目录
(1)x[0]^2+x[1]^2+x[1]^3+x[0]*x[1] 编辑
(1)x[0]^2+x[1]^2+x[1]^3+x[0]*x[1] 编辑
· 结合3D动画,用自己的语言,从轨迹、速度等多个角度讲解各个算法优缺点
1. 函数3D可视化
(1)x[0]^2+x[1]^2+x[1]^3+x[0]*x[1]
代码如下:
- import torch
- import numpy as np
- import matplotlib.pyplot as plt
-
- class Op(object):
- def __init__(self):
- pass
-
- def __call__(self, inputs):
- return self.forward(inputs)
-
- # 输入:张量inputs
- # 输出:张量outputs
- def forward(self, inputs):
- # return outputs
- raise NotImplementedError
-
- # 输入:最终输出对outputs的梯度outputs_grads
- # 输出:最终输出对inputs的梯度inputs_grads
- def backward(self, outputs_grads):
- # return inputs_grads
- raise NotImplementedError
-
-
- class OptimizedFunction3D(Op):
- def __init__(self):
- super(OptimizedFunction3D, self).__init__()
- self.params = {'x': 0}
- self.grads = {'x': 0}
-
- def forward(self, x):
- self.params['x'] = x
- return x[0] ** 2 + x[1] ** 2 + x[1] ** 3 + x[0] * x[1]
-
- def backward(self):
- x = self.params['x']
- gradient1 = 2 * x[0] + x[1]
- gradient2 = 2 * x[1] + 3 * x[1] ** 2 + x[0]
- grad1 = torch.Tensor([gradient1])
- grad2 = torch.Tensor([gradient2])
- self.grads['x'] = torch.cat([grad1, grad2])
-
-
- x1 = np.arange(-3, 3, 0.1)
- x2 = np.arange(-3, 3, 0.1)
- x1, x2 = np.meshgrid(x1, x2)
- init_x = torch.Tensor(np.array([x1, x2]))
-
- model = OptimizedFunction3D()
-
- # 绘制 f_3d函数 的 三维图像
- fig = plt.figure()
- ax = plt.axes(projection='3d')
-
- X = init_x[0].numpy()
- Y = init_x[1].numpy()
- Z = model(init_x).numpy() # 改为 model(init_x).numpy() David 2022.12.4
- ax.plot_surface(X, Y, Z, cmap='rainbow')
-
- ax.set_xlabel('x1')
- ax.set_ylabel('x2')
- ax.set_zlabel('f(x1,x2)')
- plt.show()
图像展示:
图像特征分析:
从函数上分析, 这个函数中有二次项、三次项、交叉项。二次项和三次项构成高低起伏的曲面,而交叉项使函数图像呈现出非线性特征。因此这个图像应该是高低不平、起伏没有规律的曲面。
由图像可以看出(颜色),该图像整体呈向下趋势,但中间偏蓝色,向下凹去——这就是“局部最小值”。而纵观整体,可以看出向下有更低的位置,也就是还有全局最小值的存在。因此中间的点可以称为“鞍点”。
什么是鞍点?
(2) x^2 /20+y^2
代码实现:
- import torch
- import numpy as np
- import matplotlib.pyplot as plt
-
-
- class Op(object):
- def __init__(self):
- pass
-
- def __call__(self, inputs):
- return self.forward(inputs)
-
- # 输入:张量inputs
- # 输出:张量outputs
- def forward(self, inputs):
- # return outputs
- raise NotImplementedError
-
- # 输入:最终输出对outputs的梯度outputs_grads
- # 输出:最终输出对inputs的梯度inputs_grads
- def backward(self, outputs_grads):
- # return inputs_grads
- raise NotImplementedError
-
-
- class OptimizedFunction3D(Op):
- def __init__(self):
- super(OptimizedFunction3D, self).__init__()
- self.params = {'x': 0}
- self.grads = {'x': 0}
-
- def forward(self, x):
- self.params['x'] = x
- return x[0] ** 2 / 20 + x[1] ** 2
-
- def backward(self):
- x = self.params['x']
- gradient1 = 2 * x[0] + x[1]
- gradient2 = 2 * x[1] + 3 * x[1] ** 2 + x[0]
- grad1 = torch.Tensor([gradient1])
- grad2 = torch.Tensor([gradient2])
- self.grads['x'] = torch.cat([grad1, grad2])
-
-
- x1 = np.arange(-40, 40, 0.1)
- x2 = np.arange(-40, 40, 0.1)
- x1, x2 = np.meshgrid(x1, x2)
- init_x = torch.Tensor(np.array([x1, x2]))
-
- model = OptimizedFunction3D()
-
- # 绘制 f_3d函数 的 三维图像
- fig = plt.figure()
- ax = plt.axes(projection='3d')
-
- X = init_x[0].numpy()
- Y = init_x[1].numpy()
- Z = model(init_x).numpy() # 改为 model(init_x).numpy() David 2022.12.4
- ax.plot_surface(X, Y, Z, cmap='rainbow')
-
- ax.set_xlabel('x1')
- ax.set_ylabel('x2')
- ax.set_zlabel('f(x1,x2)')
- plt.show()
图像实现:
图像分析:这个图像上课老师已经分析过了,就不多说了。这个函数乍一看和上一次作业(NNDL 作业12 优化算法2D可视化-CSDN博客)分析过的
△△pycharm跑出的3d图像没法旋转怎么办?
这个问题能不能解决还得看搜索引擎给不给力,第一次查这个问题一知半解的,搜索引擎直接把我送到svn那儿去了,整得我又是安装homebrew又是安装svn,中间还泡了一碗面。最后才知道原来在设置里改一下就行……
新版pycharm ui可以去偏好设置里直接找“偏好设置 | 工具 | Python Scientific”->“在工具窗口显示绘图”,然后把对勾去掉就行。
说实话眼神不好的话真的挺难找……因为我在查找里一直搜的是“科学模式”,也没搜到这个指路,所以就怀疑工具里是不是没有“python scientific”,还特地换成了经典ui去找这个功能。
好在大概是我的晕头转向坚持不懈感动了上天,第n次打开偏好模式,走神的时候随手一点居然就是python scientific,总算是解决了这个问题。
2.加入优化算法,画出轨迹
分别画出 和 的3D轨迹图
(1)x[0]^2+x[1]^2+x[1]^3+x[0]*x[1]
代码如下:
- import torch
- import numpy as np
- import copy
- from matplotlib import pyplot as plt
- from matplotlib import animation
- from itertools import zip_longest
-
-
- class Op(object):
- def __init__(self):
- pass
-
- def __call__(self, inputs):
- return self.forward(inputs)
-
- # 输入:张量inputs
- # 输出:张量outputs
- def forward(self, inputs):
- # return outputs
- raise NotImplementedError
-
- # 输入:最终输出对outputs的梯度outputs_grads
- # 输出:最终输出对inputs的梯度inputs_grads
- def backward(self, outputs_grads):
- # return inputs_grads
- raise NotImplementedError
-
-
- class Optimizer(object): # 优化器基类
- def __init__(self, init_lr, model):
- """
- 优化器类初始化
- """
- # 初始化学习率,用于参数更新的计算
- self.init_lr = init_lr
- # 指定优化器需要优化的模型
- self.model = model
-
- def step(self):
- """
- 定义每次迭代如何更新参数
- """
- pass
-
-
- class SimpleBatchGD(Optimizer):
- def __init__(self, init_lr, model):
- super(SimpleBatchGD, self).__init__(init_lr=init_lr, model=model)
-
- def step(self):
- # 参数更新
- if isinstance(self.model.params, dict):
- for key in self.model.params.keys():
- self.model.params[key] = self.model.params[key] - self.init_lr * self.model.grads[key]
-
-
- class Adagrad(Optimizer):
- def __init__(self, init_lr, model, epsilon):
- """
- Adagrad 优化器初始化
- 输入:
- - init_lr: 初始学习率 - model:模型,model.params存储模型参数值 - epsilon:保持数值稳定性而设置的非常小的常数
- """
- super(Adagrad, self).__init__(init_lr=init_lr, model=model)
- self.G = {}
- for key in self.model.params.keys():
- self.G[key] = 0
- self.epsilon = epsilon
-
- def adagrad(self, x, gradient_x, G, init_lr):
- """
- adagrad算法更新参数,G为参数梯度平方的累计值。
- """
- G += gradient_x ** 2
- x -= init_lr / torch.sqrt(G + self.epsilon) * gradient_x
- return x, G
-
- def step(self):
- """
- 参数更新
- """
- for key in self.model.params.keys():
- self.model.params[key], self.G[key] = self.adagrad(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.init_lr)
-
-
- class RMSprop(Optimizer):
- def __init__(self, init_lr, model, beta, epsilon):
- """
- RMSprop优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - beta:衰减率
- - epsilon:保持数值稳定性而设置的常数
- """
- super(RMSprop, self).__init__(init_lr=init_lr, model=model)
- self.G = {}
- for key in self.model.params.keys():
- self.G[key] = 0
- self.beta = beta
- self.epsilon = epsilon
-
- def rmsprop(self, x, gradient_x, G, init_lr):
- """
- rmsprop算法更新参数,G为迭代梯度平方的加权移动平均
- """
- G = self.beta * G + (1 - self.beta) * gradient_x ** 2
- x -= init_lr / torch.sqrt(G + self.epsilon) * gradient_x
- return x, G
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.G[key] = self.rmsprop(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.init_lr)
-
-
- class Momentum(Optimizer):
- def __init__(self, init_lr, model, rho):
- """
- Momentum优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - rho:动量因子
- """
- super(Momentum, self).__init__(init_lr=init_lr, model=model)
- self.delta_x = {}
- for key in self.model.params.keys():
- self.delta_x[key] = 0
- self.rho = rho
-
- def momentum(self, x, gradient_x, delta_x, init_lr):
- """
- momentum算法更新参数,delta_x为梯度的加权移动平均
- """
- delta_x = self.rho * delta_x - init_lr * gradient_x
- x += delta_x
- return x, delta_x
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.delta_x[key] = self.momentum(self.model.params[key],
- self.model.grads[key],
- self.delta_x[key],
- self.init_lr)
-
-
- class Nesterov(Optimizer):
- def __init__(self, init_lr, model, rho):
- super(Nesterov, self).__init__(init_lr=init_lr, model=model)
- self.delta_x = {}
- for key in self.model.params.keys():
- self.delta_x[key] = 0
- self.rho = rho
-
- def nesterov(self, x, gradient_x, delta_x, init_lr):
- """
- Nesterov算法更新参数,delta_x为梯度的加权移动平均
- """
- delta_x_prev = delta_x
- delta_x = self.rho * delta_x - init_lr * gradient_x
- x += -self.rho * delta_x_prev + (1 + self.rho) * delta_x
- return x, delta_x
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.delta_x[key] = self.nesterov(self.model.params[key],
- self.model.grads[key],
- self.delta_x[key],
- self.init_lr)
-
-
- class Adam(Optimizer):
- def __init__(self, init_lr, model, beta1, beta2, epsilon):
- """
- Adam优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - beta1, beta2:移动平均的衰减率
- - epsilon:保持数值稳定性而设置的常数
- """
- super(Adam, self).__init__(init_lr=init_lr, model=model)
- self.beta1 = beta1
- self.beta2 = beta2
- self.epsilon = epsilon
- self.M, self.G = {}, {}
- for key in self.model.params.keys():
- self.M[key] = 0
- self.G[key] = 0
- self.t = 1
-
- def adam(self, x, gradient_x, G, M, t, init_lr):
- """
- adam算法更新参数
- 输入:
- - x:参数
- - G:梯度平方的加权移动平均
- - M:梯度的加权移动平均
- - t:迭代次数
- - init_lr:初始学习率
- """
- M = self.beta1 * M + (1 - self.beta1) * gradient_x
- G = self.beta2 * G + (1 - self.beta2) * gradient_x ** 2
- M_hat = M / (1 - self.beta1 ** t)
- G_hat = G / (1 - self.beta2 ** t)
- t += 1
- x -= init_lr / torch.sqrt(G_hat + self.epsilon) * M_hat
- return x, G, M, t
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.G[key], self.M[key], self.t = self.adam(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.M[key],
- self.t,
- self.init_lr)
-
-
- class OptimizedFunction3D(Op):
- def __init__(self):
- super(OptimizedFunction3D, self).__init__()
- self.params = {'x': 0}
- self.grads = {'x': 0}
-
- def forward(self, x):
- self.params['x'] = x
- return x[0] ** 2 + x[1] ** 2 + x[1] ** 3 + x[0] * x[1]
-
- def backward(self):
- x = self.params['x']
- gradient1 = 2 * x[0] + x[1]
- gradient2 = 2 * x[1] + 3 * x[1] ** 2 + x[0]
- grad1 = torch.Tensor([gradient1])
- grad2 = torch.Tensor([gradient2])
- self.grads['x'] = torch.cat([grad1, grad2])
-
-
- class Visualization3D(animation.FuncAnimation):
- """ 绘制动态图像,可视化参数更新轨迹 """
-
- def __init__(self, *xy_values, z_values, labels=[], colors=[], fig, ax, interval=600, blit=True, **kwargs):
- """
- 初始化3d可视化类
- 输入:
- xy_values:三维中x,y维度的值
- z_values:三维中z维度的值
- labels:每个参数更新轨迹的标签
- colors:每个轨迹的颜色
- interval:帧之间的延迟(以毫秒为单位)
- blit:是否优化绘图
- """
- self.fig = fig
- self.ax = ax
- self.xy_values = xy_values
- self.z_values = z_values
-
- frames = max(xy_value.shape[0] for xy_value in xy_values)
- self.lines = [ax.plot([], [], [], label=label, color=color, lw=2)[0]
- for _, label, color in zip_longest(xy_values, labels, colors)]
- super(Visualization3D, self).__init__(fig, self.animate, init_func=self.init_animation, frames=frames,
- interval=interval, blit=blit, **kwargs)
-
- def init_animation(self):
- # 数值初始化
- for line in self.lines:
- line.set_data([], [])
- line.set_3d_properties(np.asarray([])) # 源程序中有这一行,加上会报错。 Edit by David 2022.12.4
- return self.lines
-
- def animate(self, i):
- # 将x,y,z三个数据传入,绘制三维图像
- for line, xy_value, z_value in zip(self.lines, self.xy_values, self.z_values):
- line.set_data(xy_value[:i, 0], xy_value[:i, 1])
- line.set_3d_properties(z_value[:i])
- return self.lines
-
-
- def train_f(model, optimizer, x_init, epoch):
- x = x_init
- all_x = []
- losses = []
- for i in range(epoch):
- all_x.append(copy.deepcopy(x.numpy())) # 浅拷贝 改为 深拷贝, 否则List的原值会被改变。 Edit by David 2022.12.4.
- loss = model(x)
- losses.append(loss)
- model.backward()
- optimizer.step()
- x = model.params['x']
- return torch.Tensor(np.array(all_x)), losses
-
-
- # 构建5个模型,分别配备不同的优化器
- model1 = OptimizedFunction3D()
- opt_gd = SimpleBatchGD(init_lr=0.01, model=model1)
-
- model2 = OptimizedFunction3D()
- opt_adagrad = Adagrad(init_lr=0.5, model=model2, epsilon=1e-7)
-
- model3 = OptimizedFunction3D()
- opt_rmsprop = RMSprop(init_lr=0.1, model=model3, beta=0.9, epsilon=1e-7)
-
- model4 = OptimizedFunction3D()
- opt_momentum = Momentum(init_lr=0.01, model=model4, rho=0.9)
-
- model5 = OptimizedFunction3D()
- opt_adam = Adam(init_lr=0.1, model=model5, beta1=0.9, beta2=0.99, epsilon=1e-7)
-
- model6 = OptimizedFunction3D()
- opt_nesterov = Nesterov(init_lr=0.01, model=model6, rho=0.9)
-
- models = [model1, model2, model3, model4, model5, model6]
- opts = [opt_gd, opt_adagrad, opt_rmsprop, opt_momentum, opt_adam, opt_nesterov]
-
- x_all_opts = []
- z_all_opts = []
-
- # 使用不同优化器训练
-
- for model, opt in zip(models, opts):
- x_init = torch.FloatTensor([2, 3])
- x_one_opt, z_one_opt = train_f(model, opt, x_init, 150) # epoch
- # 保存参数值
- x_all_opts.append(x_one_opt.numpy())
- z_all_opts.append(np.squeeze(z_one_opt))
-
- # 使用numpy.meshgrid生成x1,x2矩阵,矩阵的每一行为[-3, 3],以0.1为间隔的数值
- x1 = np.arange(-3, 3, 0.1)
- x2 = np.arange(-3, 3, 0.1)
- x1, x2 = np.meshgrid(x1, x2)
- init_x = torch.Tensor(np.array([x1, x2]))
-
- model = OptimizedFunction3D()
-
- # 绘制 f_3d函数 的 三维图像
- fig = plt.figure()
- ax = plt.axes(projection='3d')
-
- X = init_x[0].numpy()
- Y = init_x[1].numpy()
- Z = model(init_x).numpy() # 改为 model(init_x).numpy() David 2022.12.4
- ax.plot_surface(X, Y, Z, cmap='rainbow')
-
- ax.set_xlabel('x1')
- ax.set_ylabel('x2')
- ax.set_zlabel('f(x1,x2)')
- # 设置观察角度,固定在曲面上方
- ax.view_init(elev=30, azim=-60) # 调整这里的参数可以更改视角
-
- labels = ['SGD', 'AdaGrad', 'RMSprop', 'Momentum', 'Adam', 'Nesterov']
- colors = ['#3b818c', '#f6f237', '#45f637', '#fed71a', '#815c94', '#f97d1c']
-
- animator = Visualization3D(*x_all_opts, z_values=z_all_opts, labels=labels, colors=colors, fig=fig, ax=ax)
-
- ax.legend(loc='upper left')
- plt.show()
图像如下:
(2) x^2 /20+y^2
代码如下:
- import torch
- import numpy as np
- import copy
- from matplotlib import pyplot as plt
- from matplotlib import animation
- from itertools import zip_longest
- from matplotlib import cm
-
-
- class Op(object):
- def __init__(self):
- pass
-
- def __call__(self, inputs):
- return self.forward(inputs)
-
- # 输入:张量inputs
- # 输出:张量outputs
- def forward(self, inputs):
- # return outputs
- raise NotImplementedError
-
- # 输入:最终输出对outputs的梯度outputs_grads
- # 输出:最终输出对inputs的梯度inputs_grads
- def backward(self, outputs_grads):
- # return inputs_grads
- raise NotImplementedError
-
-
- class Optimizer(object): # 优化器基类
- def __init__(self, init_lr, model):
- """
- 优化器类初始化
- """
- # 初始化学习率,用于参数更新的计算
- self.init_lr = init_lr
- # 指定优化器需要优化的模型
- self.model = model
-
- def step(self):
- """
- 定义每次迭代如何更新参数
- """
- pass
-
-
- class SimpleBatchGD(Optimizer):
- def __init__(self, init_lr, model):
- super(SimpleBatchGD, self).__init__(init_lr=init_lr, model=model)
-
- def step(self):
- # 参数更新
- if isinstance(self.model.params, dict):
- for key in self.model.params.keys():
- self.model.params[key] = self.model.params[key] - self.init_lr * self.model.grads[key]
-
-
- class Adagrad(Optimizer):
- def __init__(self, init_lr, model, epsilon):
- """
- Adagrad 优化器初始化
- 输入:
- - init_lr: 初始学习率 - model:模型,model.params存储模型参数值 - epsilon:保持数值稳定性而设置的非常小的常数
- """
- super(Adagrad, self).__init__(init_lr=init_lr, model=model)
- self.G = {}
- for key in self.model.params.keys():
- self.G[key] = 0
- self.epsilon = epsilon
-
- def adagrad(self, x, gradient_x, G, init_lr):
- """
- adagrad算法更新参数,G为参数梯度平方的累计值。
- """
- G += gradient_x ** 2
- x -= init_lr / torch.sqrt(G + self.epsilon) * gradient_x
- return x, G
-
- def step(self):
- """
- 参数更新
- """
- for key in self.model.params.keys():
- self.model.params[key], self.G[key] = self.adagrad(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.init_lr)
-
-
- class RMSprop(Optimizer):
- def __init__(self, init_lr, model, beta, epsilon):
- """
- RMSprop优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - beta:衰减率
- - epsilon:保持数值稳定性而设置的常数
- """
- super(RMSprop, self).__init__(init_lr=init_lr, model=model)
- self.G = {}
- for key in self.model.params.keys():
- self.G[key] = 0
- self.beta = beta
- self.epsilon = epsilon
-
- def rmsprop(self, x, gradient_x, G, init_lr):
- """
- rmsprop算法更新参数,G为迭代梯度平方的加权移动平均
- """
- G = self.beta * G + (1 - self.beta) * gradient_x ** 2
- x -= init_lr / torch.sqrt(G + self.epsilon) * gradient_x
- return x, G
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.G[key] = self.rmsprop(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.init_lr)
-
-
- class Momentum(Optimizer):
- def __init__(self, init_lr, model, rho):
- """
- Momentum优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - rho:动量因子
- """
- super(Momentum, self).__init__(init_lr=init_lr, model=model)
- self.delta_x = {}
- for key in self.model.params.keys():
- self.delta_x[key] = 0
- self.rho = rho
-
- def momentum(self, x, gradient_x, delta_x, init_lr):
- """
- momentum算法更新参数,delta_x为梯度的加权移动平均
- """
- delta_x = self.rho * delta_x - init_lr * gradient_x
- x += delta_x
- return x, delta_x
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.delta_x[key] = self.momentum(self.model.params[key],
- self.model.grads[key],
- self.delta_x[key],
- self.init_lr)
-
-
- class Adam(Optimizer):
- def __init__(self, init_lr, model, beta1, beta2, epsilon):
- """
- Adam优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - beta1, beta2:移动平均的衰减率
- - epsilon:保持数值稳定性而设置的常数
- """
- super(Adam, self).__init__(init_lr=init_lr, model=model)
- self.beta1 = beta1
- self.beta2 = beta2
- self.epsilon = epsilon
- self.M, self.G = {}, {}
- for key in self.model.params.keys():
- self.M[key] = 0
- self.G[key] = 0
- self.t = 1
-
- def adam(self, x, gradient_x, G, M, t, init_lr):
- """
- adam算法更新参数
- 输入:
- - x:参数
- - G:梯度平方的加权移动平均
- - M:梯度的加权移动平均
- - t:迭代次数
- - init_lr:初始学习率
- """
- M = self.beta1 * M + (1 - self.beta1) * gradient_x
- G = self.beta2 * G + (1 - self.beta2) * gradient_x ** 2
- M_hat = M / (1 - self.beta1 ** t)
- G_hat = G / (1 - self.beta2 ** t)
- t += 1
- x -= init_lr / torch.sqrt(G_hat + self.epsilon) * M_hat
- return x, G, M, t
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.G[key], self.M[key], self.t = self.adam(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.M[key],
- self.t,
- self.init_lr)
-
- class Nesterov(Optimizer):
- def __init__(self, init_lr, model, rho):
- super(Nesterov, self).__init__(init_lr=init_lr, model=model)
- self.delta_x = {}
- for key in self.model.params.keys():
- self.delta_x[key] = 0
- self.rho = rho
-
- def nesterov(self, x, gradient_x, delta_x, init_lr):
- """
- Nesterov算法更新参数,delta_x为梯度的加权移动平均
- """
- delta_x_prev = delta_x
- delta_x = self.rho * delta_x - init_lr * gradient_x
- x += -self.rho * delta_x_prev + (1 + self.rho) * delta_x
- return x, delta_x
-
-
- class OptimizedFunction3D(Op):
- def __init__(self):
- super(OptimizedFunction3D, self).__init__()
- self.params = {'x': 0}
- self.grads = {'x': 0}
-
- def forward(self, x):
- self.params['x'] = x
- return x[0] * x[0] / 20 + x[1] * x[1] / 1 # x[0] ** 2 + x[1] ** 2 + x[1] ** 3 + x[0] * x[1]
-
- def backward(self):
- x = self.params['x']
- gradient1 = 2 * x[0] / 20
- gradient2 = 2 * x[1] / 1
- grad1 = torch.Tensor([gradient1])
- grad2 = torch.Tensor([gradient2])
- self.grads['x'] = torch.cat([grad1, grad2])
-
-
- class Visualization3D(animation.FuncAnimation):
- """ 绘制动态图像,可视化参数更新轨迹 """
-
- def __init__(self, *xy_values, z_values, labels=[], colors=[], fig, ax, interval=100, blit=True, **kwargs):
- """
- 初始化3d可视化类
- 输入:
- xy_values:三维中x,y维度的值
- z_values:三维中z维度的值
- labels:每个参数更新轨迹的标签
- colors:每个轨迹的颜色
- interval:帧之间的延迟(以毫秒为单位)
- blit:是否优化绘图
- """
- self.fig = fig
- self.ax = ax
- self.xy_values = xy_values
- self.z_values = z_values
-
- frames = max(xy_value.shape[0] for xy_value in xy_values)
-
- self.lines = [ax.plot([], [], [], label=label, color=color, lw=2)[0]
- for _, label, color in zip_longest(xy_values, labels, colors)]
- self.points = [ax.plot([], [], [], color=color, markeredgewidth=1, markeredgecolor='black', marker='o')[0]
- for _, color in zip_longest(xy_values, colors)]
- # print(self.lines)
- super(Visualization3D, self).__init__(fig, self.animate, init_func=self.init_animation, frames=frames,
- interval=interval, blit=blit, **kwargs)
-
- def init_animation(self):
- # 数值初始化
- for line in self.lines:
- line.set_data_3d([], [], [])
- for point in self.points:
- point.set_data_3d([], [], [])
- return self.points + self.lines
-
- def animate(self, i):
- # 将x,y,z三个数据传入,绘制三维图像
- for line, xy_value, z_value in zip(self.lines, self.xy_values, self.z_values):
- line.set_data_3d(xy_value[:i, 0], xy_value[:i, 1], z_value[:i])
- for point, xy_value, z_value in zip(self.points, self.xy_values, self.z_values):
- point.set_data_3d(xy_value[i, 0], xy_value[i, 1], z_value[i])
- return self.points + self.lines
-
-
- def train_f(model, optimizer, x_init, epoch):
- x = x_init
- all_x = []
- losses = []
- for i in range(epoch):
- all_x.append(copy.deepcopy(x.numpy())) # 浅拷贝 改为 深拷贝, 否则List的原值会被改变。 Edit by David 2022.12.4.
- loss = model(x)
- losses.append(loss)
- model.backward()
- optimizer.step()
- x = model.params['x']
- return torch.Tensor(np.array(all_x)), losses
-
-
- # 构建5个模型,分别配备不同的优化器
- model1 = OptimizedFunction3D()
- opt_gd = SimpleBatchGD(init_lr=0.95, model=model1)
-
- model2 = OptimizedFunction3D()
- opt_adagrad = Adagrad(init_lr=1.5, model=model2, epsilon=1e-7)
-
- model3 = OptimizedFunction3D()
- opt_rmsprop = RMSprop(init_lr=0.05, model=model3, beta=0.9, epsilon=1e-7)
-
- model4 = OptimizedFunction3D()
- opt_momentum = Momentum(init_lr=0.1, model=model4, rho=0.9)
-
- model5 = OptimizedFunction3D()
- opt_adam = Adam(init_lr=0.3, model=model5, beta1=0.9, beta2=0.99, epsilon=1e-7)
-
- model6 = OptimizedFunction3D()
- opt_nesterov = Nesterov(init_lr=0.01, model=model6, rho=0.9)
-
- models = [model1, model2, model3, model4, model5, model6]
- opts = [opt_gd, opt_adagrad, opt_rmsprop, opt_momentum, opt_adam, opt_nesterov]
-
- x_all_opts = []
- z_all_opts = []
-
- # 使用不同优化器训练
-
- for model, opt in zip(models, opts):
- x_init = torch.FloatTensor([-7, 2])
- x_one_opt, z_one_opt = train_f(model, opt, x_init, 100) # epoch
- # 保存参数值
- x_all_opts.append(x_one_opt.numpy())
- z_all_opts.append(np.squeeze(z_one_opt))
-
- # 使用numpy.meshgrid生成x1,x2矩阵,矩阵的每一行为[-3, 3],以0.1为间隔的数值
- x1 = np.arange(-10, 10, 0.01)
- x2 = np.arange(-5, 5, 0.01)
- x1, x2 = np.meshgrid(x1, x2)
- init_x = torch.Tensor(np.array([x1, x2]))
-
- model = OptimizedFunction3D()
-
- # 绘制 f_3d函数 的 三维图像
- fig = plt.figure()
- ax = plt.axes(projection='3d')
- X = init_x[0].numpy()
- Y = init_x[1].numpy()
- Z = model(init_x).numpy() # 改为 model(init_x).numpy() David 2022.12.4
- surf = ax.plot_surface(X, Y, Z, edgecolor='grey', cmap=cm.coolwarm)
- # fig.colorbar(surf, shrink=0.5, aspect=1)
- # ax.set_zlim(-3, 2)
- ax.set_xlabel('x1')
- ax.set_ylabel('x2')
- ax.set_zlabel('f(x1,x2)')
-
- labels = ['SGD', 'AdaGrad', 'RMSprop', 'Momentum', 'Adam','Nesterov']
- colors = ['#3b818c', '#f6f237', '#45f637', '#fed71a', '#815c94', '#f97d1c']
-
- animator = Visualization3D(*x_all_opts, z_values=z_all_opts, labels=labels, colors=colors, fig=fig, ax=ax, interval=100)
- ax.legend(loc='upper right')
-
- plt.show()
- # animator.save('teaser' + '.gif', writer='imagemagick',fps=10) # 效果不好,估计被挡住了…… 有待进一步提高 Edit by David 2022.12.4
- # save不好用,不费劲了,安装个软件做gif https://pc.qq.com/detail/13/detail_23913.html
图像如下:
· 结合3D动画,用自己的语言,从轨迹、速度等多个角度讲解各个算法优缺点
特地在两个图用了一样的颜色表示不同算法的轨迹,可以看出:
SGD在图1中速度比较缓慢,但图2中由于在一个坡底,因此左右大幅度晃动,总体来讲比较耗时,可能会停留在鞍点,找不到全局最小值;
adagrad(暗黄)和RMSprop(绿)都属于在SGD基础上的优化,因此在稍微简单的曲面中二者的移动方向基本一致。而图1中可以看出黄线移动一段时间后停止了,绿线沿着黄线的路径继续前进着,而图2可以看出,绿线的速度是没有黄线快的。
分析一下原因:adagrad是通过累计值计算梯度优化学习率的,当累加值变大时学习率逐渐变小,几乎为0,因此最终难以前进;而RMSprop则是通过指数衰减移动平均来改变学习率,可以理解为RMSprop是通过一个移动窗口来计算学习率的,因此它一定程度内不会像adagrad那样缩减至零,但早期移动速度也不会像adagrad那样移动速度特别快。
而Momentum(浅黄)和Nesterov(橘色)则是动量类的经典选手,第一张图可以看出两个动量法都以极高的速度冲出了局部最小值,向着全局最小值的方向去了;图二中可以看出牛顿法比起基础动量法摆动的幅度更小。
3.复现CS231经典动画
代码实现:
- import torch
- import numpy as np
- import copy
- from matplotlib import pyplot as plt
- from matplotlib import animation
- from itertools import zip_longest
- from matplotlib import cm
-
-
- class Op(object):
- def __init__(self):
- pass
-
- def __call__(self, inputs):
- return self.forward(inputs)
-
- # 输入:张量inputs
- # 输出:张量outputs
- def forward(self, inputs):
- # return outputs
- raise NotImplementedError
-
- # 输入:最终输出对outputs的梯度outputs_grads
- # 输出:最终输出对inputs的梯度inputs_grads
- def backward(self, outputs_grads):
- # return inputs_grads
- raise NotImplementedError
-
-
- class Optimizer(object): # 优化器基类
- def __init__(self, init_lr, model):
- """
- 优化器类初始化
- """
- # 初始化学习率,用于参数更新的计算
- self.init_lr = init_lr
- # 指定优化器需要优化的模型
- self.model = model
-
- def step(self):
- """
- 定义每次迭代如何更新参数
- """
- pass
-
-
- class SimpleBatchGD(Optimizer):
- def __init__(self, init_lr, model):
- super(SimpleBatchGD, self).__init__(init_lr=init_lr, model=model)
-
- def step(self):
- # 参数更新
- if isinstance(self.model.params, dict):
- for key in self.model.params.keys():
- self.model.params[key] = self.model.params[key] - self.init_lr * self.model.grads[key]
-
-
- class Adagrad(Optimizer):
- def __init__(self, init_lr, model, epsilon):
- """
- Adagrad 优化器初始化
- 输入:
- - init_lr: 初始学习率 - model:模型,model.params存储模型参数值 - epsilon:保持数值稳定性而设置的非常小的常数
- """
- super(Adagrad, self).__init__(init_lr=init_lr, model=model)
- self.G = {}
- for key in self.model.params.keys():
- self.G[key] = 0
- self.epsilon = epsilon
-
- def adagrad(self, x, gradient_x, G, init_lr):
- """
- adagrad算法更新参数,G为参数梯度平方的累计值。
- """
- G += gradient_x ** 2
- x -= init_lr / torch.sqrt(G + self.epsilon) * gradient_x
- return x, G
-
- def step(self):
- """
- 参数更新
- """
- for key in self.model.params.keys():
- self.model.params[key], self.G[key] = self.adagrad(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.init_lr)
-
-
- class RMSprop(Optimizer):
- def __init__(self, init_lr, model, beta, epsilon):
- """
- RMSprop优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - beta:衰减率
- - epsilon:保持数值稳定性而设置的常数
- """
- super(RMSprop, self).__init__(init_lr=init_lr, model=model)
- self.G = {}
- for key in self.model.params.keys():
- self.G[key] = 0
- self.beta = beta
- self.epsilon = epsilon
-
- def rmsprop(self, x, gradient_x, G, init_lr):
- """
- rmsprop算法更新参数,G为迭代梯度平方的加权移动平均
- """
- G = self.beta * G + (1 - self.beta) * gradient_x ** 2
- x -= init_lr / torch.sqrt(G + self.epsilon) * gradient_x
- return x, G
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.G[key] = self.rmsprop(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.init_lr)
-
-
- class Momentum(Optimizer):
- def __init__(self, init_lr, model, rho):
- """
- Momentum优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - rho:动量因子
- """
- super(Momentum, self).__init__(init_lr=init_lr, model=model)
- self.delta_x = {}
- for key in self.model.params.keys():
- self.delta_x[key] = 0
- self.rho = rho
-
- def momentum(self, x, gradient_x, delta_x, init_lr):
- """
- momentum算法更新参数,delta_x为梯度的加权移动平均
- """
- delta_x = self.rho * delta_x - init_lr * gradient_x
- x += delta_x
- return x, delta_x
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.delta_x[key] = self.momentum(self.model.params[key],
- self.model.grads[key],
- self.delta_x[key],
- self.init_lr)
-
-
- class Adam(Optimizer):
- def __init__(self, init_lr, model, beta1, beta2, epsilon):
- """
- Adam优化器初始化
- 输入:
- - init_lr:初始学习率
- - model:模型,model.params存储模型参数值
- - beta1, beta2:移动平均的衰减率
- - epsilon:保持数值稳定性而设置的常数
- """
- super(Adam, self).__init__(init_lr=init_lr, model=model)
- self.beta1 = beta1
- self.beta2 = beta2
- self.epsilon = epsilon
- self.M, self.G = {}, {}
- for key in self.model.params.keys():
- self.M[key] = 0
- self.G[key] = 0
- self.t = 1
-
- def adam(self, x, gradient_x, G, M, t, init_lr):
- """
- adam算法更新参数
- 输入:
- - x:参数
- - G:梯度平方的加权移动平均
- - M:梯度的加权移动平均
- - t:迭代次数
- - init_lr:初始学习率
- """
- M = self.beta1 * M + (1 - self.beta1) * gradient_x
- G = self.beta2 * G + (1 - self.beta2) * gradient_x ** 2
- M_hat = M / (1 - self.beta1 ** t)
- G_hat = G / (1 - self.beta2 ** t)
- t += 1
- x -= init_lr / torch.sqrt(G_hat + self.epsilon) * M_hat
- return x, G, M, t
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.G[key], self.M[key], self.t = self.adam(self.model.params[key],
- self.model.grads[key],
- self.G[key],
- self.M[key],
- self.t,
- self.init_lr)
-
- class Nesterov(Optimizer):
- def __init__(self, init_lr, model, rho):
- super(Nesterov, self).__init__(init_lr=init_lr, model=model)
- self.delta_x = {}
- for key in self.model.params.keys():
- self.delta_x[key] = 0
- self.rho = rho
-
- def nesterov(self, x, gradient_x, delta_x, init_lr):
- """
- Nesterov算法更新参数,delta_x为梯度的加权移动平均
- """
- delta_x_prev = delta_x
- delta_x = self.rho * delta_x - init_lr * gradient_x
- x += -self.rho * delta_x_prev + (1 + self.rho) * delta_x
- return x, delta_x
-
- def step(self):
- """参数更新"""
- for key in self.model.params.keys():
- self.model.params[key], self.delta_x[key] = self.nesterov(self.model.params[key],
- self.model.grads[key],
- self.delta_x[key],
- self.init_lr)
-
-
- class OptimizedFunction3D(Op):
- def __init__(self):
- super(OptimizedFunction3D, self).__init__()
- self.params = {'x': 0}
- self.grads = {'x': 0}
-
- def forward(self, x):
- self.params['x'] = x
- return - x[0] * x[0] / 2 + x[1] * x[1] / 1 # x[0] ** 2 + x[1] ** 2 + x[1] ** 3 + x[0] * x[1]
-
- def backward(self):
- x = self.params['x']
- gradient1 = - 2 * x[0] / 2
- gradient2 = 2 * x[1] / 1
- grad1 = torch.Tensor([gradient1])
- grad2 = torch.Tensor([gradient2])
- self.grads['x'] = torch.cat([grad1, grad2])
-
-
- class Visualization3D(animation.FuncAnimation):
- """ 绘制动态图像,可视化参数更新轨迹 """
-
- def __init__(self, *xy_values, z_values, labels=[], colors=[], fig, ax, interval=100, blit=True, **kwargs):
- """
- 初始化3d可视化类
- 输入:
- xy_values:三维中x,y维度的值
- z_values:三维中z维度的值
- labels:每个参数更新轨迹的标签
- colors:每个轨迹的颜色
- interval:帧之间的延迟(以毫秒为单位)
- blit:是否优化绘图
- """
- self.fig = fig
- self.ax = ax
- self.xy_values = xy_values
- self.z_values = z_values
-
- frames = max(xy_value.shape[0] for xy_value in xy_values)
-
- self.lines = [ax.plot([], [], [], label=label, color=color, lw=2)[0]
- for _, label, color in zip_longest(xy_values, labels, colors)]
- self.points = [ax.plot([], [], [], color=color, markeredgewidth=1, markeredgecolor='black', marker='o')[0]
- for _, color in zip_longest(xy_values, colors)]
- # print(self.lines)
- super(Visualization3D, self).__init__(fig, self.animate, init_func=self.init_animation, frames=frames,
- interval=interval, blit=blit, **kwargs)
-
- def init_animation(self):
- # 数值初始化
- for line in self.lines:
- line.set_data_3d([], [], [])
- for point in self.points:
- point.set_data_3d([], [], [])
- return self.points + self.lines
-
- def animate(self, i):
- # 将x,y,z三个数据传入,绘制三维图像
- for line, xy_value, z_value in zip(self.lines, self.xy_values, self.z_values):
- line.set_data_3d(xy_value[:i, 0], xy_value[:i, 1], z_value[:i])
- for point, xy_value, z_value in zip(self.points, self.xy_values, self.z_values):
- point.set_data_3d(xy_value[i, 0], xy_value[i, 1], z_value[i])
- return self.points + self.lines
-
-
- def train_f(model, optimizer, x_init, epoch):
- x = x_init
- all_x = []
- losses = []
- for i in range(epoch):
- all_x.append(copy.deepcopy(x.numpy())) # 浅拷贝 改为 深拷贝, 否则List的原值会被改变。 Edit by David 2022.12.4.
- loss = model(x)
- losses.append(loss)
- model.backward()
- optimizer.step()
- x = model.params['x']
- return torch.Tensor(np.array(all_x)), losses
-
-
- # 构建6个模型,分别配备不同的优化器
- model1 = OptimizedFunction3D()
- opt_gd = SimpleBatchGD(init_lr=0.05, model=model1)
-
- model2 = OptimizedFunction3D()
- opt_adagrad = Adagrad(init_lr=0.05, model=model2, epsilon=1e-7)
-
- model3 = OptimizedFunction3D()
- opt_rmsprop = RMSprop(init_lr=0.05, model=model3, beta=0.9, epsilon=1e-7)
-
- model4 = OptimizedFunction3D()
- opt_momentum = Momentum(init_lr=0.05, model=model4, rho=0.9)
-
- model5 = OptimizedFunction3D()
- opt_adam = Adam(init_lr=0.05, model=model5, beta1=0.9, beta2=0.99, epsilon=1e-7)
-
- model6 = OptimizedFunction3D()
- opt_nesterov = Nesterov(init_lr=0.01, model=model6, rho=0.9)
-
-
- models = [model6, model5, model2, model3, model4, model1]
- opts = [opt_nesterov,opt_adam, opt_adagrad, opt_rmsprop, opt_momentum, opt_gd]
-
- x_all_opts = []
- z_all_opts = []
-
- # 使用不同优化器训练
-
- for model, opt in zip(models, opts):
- x_init = torch.FloatTensor([0.00001, 0.5])
- x_one_opt, z_one_opt = train_f(model, opt, x_init, 100) # epoch
- # 保存参数值
- x_all_opts.append(x_one_opt.numpy())
- z_all_opts.append(np.squeeze(z_one_opt))
-
- # 使用numpy.meshgrid生成x1,x2矩阵,矩阵的每一行为[-3, 3],以0.1为间隔的数值
- x1 = np.arange(-1, 2, 0.01)
- x2 = np.arange(-1, 1, 0.05)
- x1, x2 = np.meshgrid(x1, x2)
- init_x = torch.Tensor(np.array([x1, x2]))
-
- model = OptimizedFunction3D()
-
- # 绘制 f_3d函数 的 三维图像
- fig = plt.figure()
- ax = plt.axes(projection='3d')
- X = init_x[0].numpy()
- Y = init_x[1].numpy()
- Z = model(init_x).numpy() # 改为 model(init_x).numpy() David 2022.12.4
- surf = ax.plot_surface(X, Y, Z, edgecolor='grey', cmap=cm.coolwarm)
- # fig.colorbar(surf, shrink=0.5, aspect=1)
- ax.set_zlim(-3, 2)
- ax.set_xlabel('x1')
- ax.set_ylabel('x2')
- ax.set_zlabel('f(x1,x2)')
-
- labels = ['Adam', 'AdaGrad', 'RMSprop', 'Momentum', 'SGD','Nesterov']
- colors = ['#3b818c', '#f6f237', '#45f637', '#fed71a', '#815c94', '#f97d1c']
-
- animator = Visualization3D(*x_all_opts, z_values=z_all_opts, labels=labels, colors=colors, fig=fig, ax=ax)
- ax.legend(loc='upper right')
-
- plt.show()
- # animator.save('teaser' + '.gif', writer='imagemagick',fps=10) # 效果不好,估计被挡住了…… 有待进一步提高 Edit by David 2022.12.4
- # save不好用,不费劲了,安装个软件做gif https://pc.qq.com/detail/13/detail_23913.html
图像实现:
结合3D动画,用自己的语言,从轨迹、速度等多个角度讲解各个算法优缺点
上图的颜色还是和前两个图一样,可以直观地看出算法的差异来,但优缺点我就不讲了,因为在上个作业12已经说过了。
·心得总结
说来也巧,本来写作业12的时候想着概念都说了,上课讲过的那个动图也顺便说说分析分析吧,结果写完再看作业13,发现原来讲动图的任务在这里……
不过个人感觉我作业12写的确实挺好,各个算法的优缺点、动图的分析,在作业12都已经说过了,也查阅了很多文献很多资料,以至于在作业13没什么想说的了。
这次作业13我也做了一些优化,比如在图里加了“Nesterov”算法的曲线,在每个图都用相同的颜色表示轨迹。应该可以通过第3个实验返回去优化一下第1个实验的,但复习要紧,我还是先去复习吧。
至于期末总结部分,一个舍友说只用写心得,一个舍友说需要写知识点的总结,我想着那就等快考深度学习的时候再做总结吧,总结知识点+心得,顺便复习了。
参考文献:
评论记录:
回复评论: