《神经网络与深度学习》第3章编程练习


logistic_regression-exercise

'''
生成数据集, 看明白即可无需填写代码
'+' 从高斯分布采样 (X, Y) ~ N(3, 6, 1, 1, 0).
'o' 从高斯分布采样 (X, Y) ~ N(6, 3, 1, 1, 0)
'''
import tensorflow as tf
import matplotlib.pyplot as plt

from matplotlib import animation, rc
from IPython.display import HTML
import matplotlib.cm as cm
import numpy as np
%matplotlib inline

dot_num = 100
x_p = np.random.normal(3., 1, dot_num)
y_p = np.random.normal(6., 1, dot_num)
y = np.ones(dot_num)
C1 = np.array([x_p, y_p, y]).T

x_n = np.random.normal(6., 1, dot_num)
y_n = np.random.normal(3., 1, dot_num)
y = np.zeros(dot_num)
C2 = np.array([x_n, y_n, y]).T

plt.scatter(C1[:, 0], C1[:, 1], c='b', marker='+')
plt.scatter(C2[:, 0], C2[:, 1], c='g', marker='o')

data_set = np.concatenate((C1, C2), axis=0)
np.random.shuffle(data_set)
'''
建立模型
建立模型类,定义loss函数,定义一步梯度下降过程函数

填空一:实现sigmoid的交叉熵损失函数(不使用tf内置的loss 函数)
'''
epsilon = 1e-12
class LogisticRegression():
    def __init__(self):
        self.W = tf.Variable(shape=[2, 1], dtype=tf.float32, 
            initial_value=tf.random.uniform(shape=[2, 1], minval=-0.1, maxval=0.1))
        self.b = tf.Variable(shape=[1], dtype=tf.float32, initial_value=tf.zeros(shape=[1]))

        self.trainable_variables = [self.W, self.b]
    @tf.function
    def __call__(self, inp):
        logits = tf.matmul(inp, self.W) + self.b # shape(N, 1)
        pred = tf.nn.sigmoid(logits)
        return pred

@tf.function
def compute_loss(pred, label):
#     if not isinstance(label, tf.Tensor):
#         label = tf.constant(label, dtype=tf.float32)
    label = tf.cast(label, dtype = tf.float32)
    pred = tf.squeeze(pred, axis=1)
    '''============================='''
    #输入label shape(N,), pred shape(N,)
    #输出 losses shape(N,) 每一个样本一个loss
    #todo 填空一,实现sigmoid的交叉熵损失函数(不使用tf内置的loss 函数)
    '''============================='''
    one = tf.ones(pred.shape[0])
    losses = -(label * tf.math.log(pred) + (one - label) * tf.math.log(one - pred))
    loss = tf.reduce_mean(losses)

    pred = tf.where(pred>0.5, tf.ones_like(pred), tf.zeros_like(pred))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(label, pred), dtype=tf.float32))
    return loss, accuracy
@tf.function
def train_one_step(model, optimizer, x, y):
    with tf.GradientTape() as tape:
        pred = model(x)
        loss, accuracy = compute_loss(pred, y)

    grads = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))
    return loss, accuracy, model.W, model.b
'''
实例化一个模型,进行训练
'''
if __name__ == '__main__':
    model = LogisticRegression()
    opt = tf.keras.optimizers.SGD(learning_rate=0.01)
    x1, x2, y = list(zip(*data_set))
    x = list(zip(x1, x2))
    x = tf.cast(x, dtype=tf.float32)    #此处需要将x的类型从float64改为float32
    animation_fram = []

    for i in range(200):
        loss, accuracy, W_opt, b_opt = train_one_step(model, opt, x, y)
        animation_fram.append((W_opt.numpy()[0, 0], W_opt.numpy()[1, 0], b_opt.numpy(), loss.numpy()))
        if i%20 == 0:
            print(f'loss: {loss.numpy():.4}\t accuracy: {accuracy.numpy():.4}')
'''
结果展示,无需填写代码
若报错请尝试安装ffmpeg
'''

f, ax = plt.subplots(figsize=(6,4))
f.suptitle('Logistic Regression Example', fontsize=15)
plt.ylabel('Y')
plt.xlabel('X')
ax.set_xlim(0, 10)
ax.set_ylim(0, 10)

line_d, = ax.plot([], [], label='fit_line')
C1_dots, = ax.plot([], [], '+', c='b', label='actual_dots')
C2_dots, = ax.plot([], [], 'o', c='g' ,label='actual_dots')


frame_text = ax.text(0.02, 0.95,'',horizontalalignment='left',verticalalignment='top', transform=ax.transAxes)
# ax.legend()

def init():
    line_d.set_data([],[])
    C1_dots.set_data([],[])
    C2_dots.set_data([],[])
    return (line_d,) + (C1_dots,) + (C2_dots,)

def animate(i):
    xx = np.arange(10, step=0.1)
    a = animation_fram[i][0]
    b = animation_fram[i][1]
    c = animation_fram[i][2]
    yy = a/-b * xx +c/-b
    line_d.set_data(xx, yy)

    C1_dots.set_data(C1[:, 0], C1[:, 1])
    C2_dots.set_data(C2[:, 0], C2[:, 1])

    frame_text.set_text('Timestep = %.1d/%.1d\nLoss = %.3f' % (i, len(animation_fram), animation_fram[i][3]))

    return (line_d,) + (C1_dots,) + (C2_dots,)

anim = animation.FuncAnimation(f, animate, init_func=init,
                               frames=len(animation_fram), interval=30, blit=True)

HTML(anim.to_html5_video())

softmax_regression-exercise

'''
生成数据集, 看明白即可无需填写代码
'+' 从高斯分布采样 (X, Y) ~ N(3, 6, 1, 1, 0).
'o' 从高斯分布采样 (X, Y) ~ N(6, 3, 1, 1, 0)
'*' 从高斯分布采样 (X, Y) ~ N(7, 7, 1, 1, 0)
'''
import tensorflow as tf
import matplotlib.pyplot as plt

from matplotlib import animation, rc
from IPython.display import HTML
import matplotlib.cm as cm
import numpy as np
%matplotlib inline

dot_num = 100
x_p = np.random.normal(3., 1, dot_num)
y_p = np.random.normal(6., 1, dot_num)
y = np.ones(dot_num)
C1 = np.array([x_p, y_p, y]).T

x_n = np.random.normal(6., 1, dot_num)
y_n = np.random.normal(3., 1, dot_num)
y = np.zeros(dot_num)
C2 = np.array([x_n, y_n, y]).T

x_b = np.random.normal(7., 1, dot_num)
y_b = np.random.normal(7., 1, dot_num)
y = np.ones(dot_num)*2
C3 = np.array([x_b, y_b, y]).T

plt.scatter(C1[:, 0], C1[:, 1], c='b', marker='+')
plt.scatter(C2[:, 0], C2[:, 1], c='g', marker='o')
plt.scatter(C3[:, 0], C3[:, 1], c='r', marker='*')

data_set = np.concatenate((C1, C2, C3), axis=0)
np.random.shuffle(data_set)
'''
建立模型
建立模型类,定义loss函数,定义一步梯度下降过程函数

填空一:在__init__构造函数中建立模型所需的参数

填空二:实现softmax的交叉熵损失函数(不使用tf内置的loss 函数)'''
epsilon = 1e-12
class SoftmaxRegression():
    def __init__(self):
        '''============================='''
        #todo 填空一,构建模型所需的参数 self.W, self.b 可以参考logistic-regression-exercise
        '''============================='''
        self.W = tf.Variable(shape=[2, 3], dtype=tf.float32, initial_value=tf.random.uniform(shape=[2, 3], minval=-0.1, maxval=0.1))
        self.b = tf.Variable(shape=[1], dtype=tf.float32, initial_value=tf.zeros(shape=[1]))
        self.trainable_variables = [self.W, self.b]
    @tf.function
    def __call__(self, inp):
        logits = tf.matmul(inp, self.W) + self.b # shape(N, 3)
        pred = tf.nn.softmax(logits)
        return pred    

@tf.function
def compute_loss(pred, label):
    label = tf.one_hot(tf.cast(label, dtype=tf.int32), dtype=tf.float32, depth=3)

    '''============================='''
    #输入label shape(N, 3), pred shape(N, 3)
    #输出 losses shape(N,) 每一个样本一个loss
    #todo 填空二,实现softmax的交叉熵损失函数(不使用tf内置的loss 函数)
    '''============================='''
    losses = []
    for i in range(pred.shape[0]):
        each_loss = -(tf.matmul(tf.expand_dims(label[i], axis=0), tf.expand_dims(tf.math.log(pred[i]), axis=1)))
        losses.append(each_loss)

    losses = tf.stack(losses)
    loss = tf.reduce_mean(losses)

    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(label,axis=1), tf.argmax(pred, axis=1)), dtype=tf.float32))
    return loss, accuracy

@tf.function
def train_one_step(model, optimizer, x, y):
    with tf.GradientTape() as tape:
        pred = model(x)
        loss, accuracy = compute_loss(pred, y)

    grads = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(grads, model.trainable_variables))
    return loss, accuracy
'''
实例化一个模型,进行训练
'''
model = SoftmaxRegression()
opt = tf.keras.optimizers.SGD(learning_rate=0.01)
x1, x2, y = list(zip(*data_set))
x = list(zip(x1, x2))
x = tf.cast(x, dtype=tf.float32)    #转换类型
for i in range(1000):
    loss, accuracy = train_one_step(model, opt, x, y)
    if i%50==49:
        print(f'loss: {loss.numpy():.4}\t accuracy: {accuracy.numpy():.4}')
'''
结果展示,无需填写代码
'''        
plt.scatter(C1[:, 0], C1[:, 1], c='b', marker='+')
plt.scatter(C2[:, 0], C2[:, 1], c='g', marker='o')
plt.scatter(C3[:, 0], C3[:, 1], c='r', marker='*')

x = np.arange(0., 10., 0.1)
y = np.arange(0., 10., 0.1)

X, Y = np.meshgrid(x, y)
inp = np.array(list(zip(X.reshape(-1), Y.reshape(-1))), dtype=np.float32)
print(inp.shape)
Z = model(inp)
Z = np.argmax(Z, axis=1)
Z = Z.reshape(X.shape)
plt.contour(X,Y,Z)
plt.show()

文章作者: Qin Jiahe
版权声明: 本博客所有文章除特別声明外,均采用 CC BY 4.0 许可协议。转载请注明来源 Qin Jiahe !
评论
 本篇
《神经网络与深度学习》第3章编程练习 《神经网络与深度学习》第3章编程练习
logistic_regression-exercise''' 生成数据集, 看明白即可无需填写代码 '+' 从高斯分布采样 (X, Y) ~ N(3, 6, 1, 1, 0). 'o' 从高斯分布采样 (X, Y) ~ N(6, 3, 1
2021-09-20 Qin Jiahe
下一篇 
MAMP无法打开Apache服务器 MAMP无法打开Apache服务器
最近装了一个MAMP打算学一下MySQL,结果打开发现Apache服务器不启动,折腾了一番后发现在D:\MAMP\logs下的日志文件apache_error.log里的报错: [Sat Jul 17 00:04:40 2021] [war
2021-07-17 Qin Jiahe
  目录