3.7 softmax回归的简单实现

发布时间 2023-05-28 15:50:29作者: AncilunKiang
import torch
from torch import nn
from d2l import torch as d2l

batch_size = 256  # 保持批量大小为 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)  # 仍使用Fashion-MNIST数据集

3.7.1 初始化模型参数

net = nn.Sequential(nn.Flatten(), nn.Linear(784, 10))  # flatten负责调整网络输入形状,添加一个有 10 个输出的全连接层

def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.normal_(m.weight, std=0.01)  # 使用正态分布中的随机值初始化权重

net.apply(init_weights)
Sequential(
  (0): Flatten(start_dim=1, end_dim=-1)
  (1): Linear(in_features=784, out_features=10, bias=True)
)

3.7.2 重新审视 softmax 的实现

从计算的角度来讲,指数可能会造成数值稳定性问题,即可能会发生溢出。解决上溢问题可在 softmax 运算前先从所有 \(o_k\) 中减去 \(\max(o_k)\)。但如若有些 \(o_j-\max(o_k)\) 为过小的负值时则又可能发生下溢,此时可以尽量避免计算 \(\exp(o_j-\max(o_k))\)

\[\begin{align} \log(\hat{y}_i)&=\log\left(\frac{\exp(o_j-\max(o_k))}{\sum_k\exp(o_k-\max(o_k))}\right)\\ &=\log(\exp(o_j-\max(o_k)))-\log\left(\sum_k\exp(o_k-\max(o_k))\right)\\ &=o_j-\max(o_k)-\log\left(\sum_k\exp(o_k-\max(o_k))\right) \end{align} \]

loss = nn.CrossEntropyLoss(reduction='none')

3.7.3 优化算法

trainer = torch.optim.SGD(net.parameters(), lr=0.1)  # 使用学习率为 0.1 的小批量随机梯度下降作为优化算法

3.7.4 训练

num_epochs = 10
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)  # 调用上节定义的训练函数来训练模型


image

练习

(1)尝试调增超参数,例如批量大小、轮数和学习率,并查看结果。

batch_size2 = 1024  # 将 batch_size 提高到 1024
train_iter2, test_iter2 = d2l.load_data_fashion_mnist(batch_size2)
net2 = nn.Sequential(nn.Flatten(),nn.Linear(784,10))
net2.apply(init_weights)

num_epochs = 10
trainer = torch.optim.SGD(net2.parameters(), lr=0.1)
d2l.train_ch3(net2, train_iter2, test_iter2, loss, num_epochs, trainer)  # 提高 batch size 会使 train loss 提高
---------------------------------------------------------------------------

AssertionError                            Traceback (most recent call last)

Cell In[6], line 8
      6 num_epochs = 10
      7 trainer = torch.optim.SGD(net2.parameters(), lr=0.1)
----> 8 d2l.train_ch3(net2, train_iter2, test_iter2, loss, num_epochs, trainer)


File c:\Software\Miniconda3\envs\d2l\lib\site-packages\d2l\torch.py:340, in train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
    338     animator.add(epoch + 1, train_metrics + (test_acc,))
    339 train_loss, train_acc = train_metrics
--> 340 assert train_loss < 0.5, train_loss
    341 assert train_acc <= 1 and train_acc > 0.7, train_acc
    342 assert test_acc <= 1 and test_acc > 0.7, test_acc


AssertionError: 0.5225925379435221

image

batch_size3 = 256
train_iter3, test_iter3 = d2l.load_data_fashion_mnist(batch_size3)
net3 = nn.Sequential(nn.Flatten(),nn.Linear(784,10))
net3.apply(init_weights)

num_epochs = 40  # 将轮数提高到 40
trainer = torch.optim.SGD(net3.parameters(), lr=0.1)
d2l.train_ch3(net3, train_iter3, test_iter3, loss, num_epochs, trainer)  # 提高轮数会使 test acc 突然下降,应该是过拟合了。


image

batch_size4 = 256
train_iter4, test_iter4 = d2l.load_data_fashion_mnist(batch_size4)
net4 = nn.Sequential(nn.Flatten(),nn.Linear(784,10))
net4.apply(init_weights)

num_epochs = 10
trainer = torch.optim.SGD(net4.parameters(), lr=0.4)  # 将学习率提高到 0.4
d2l.train_ch3(net4, train_iter4, test_iter4, loss, num_epochs, trainer)  # 提高学习率会使 test acc 极不稳定,train loss 无法收敛。
---------------------------------------------------------------------------

AssertionError                            Traceback (most recent call last)

Cell In[9], line 8
      6 num_epochs = 10
      7 trainer = torch.optim.SGD(net4.parameters(), lr=0.4)  # 将学习率提高到 0.4
----> 8 d2l.train_ch3(net4, train_iter4, test_iter4, loss, num_epochs, trainer)


File c:\Software\Miniconda3\envs\d2l\lib\site-packages\d2l\torch.py:340, in train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)
    338     animator.add(epoch + 1, train_metrics + (test_acc,))
    339 train_loss, train_acc = train_metrics
--> 340 assert train_loss < 0.5, train_loss
    341 assert train_acc <= 1 and train_acc > 0.7, train_acc
    342 assert test_acc <= 1 and test_acc > 0.7, test_acc


AssertionError: 0.6684705724080404

image


(2)增加轮数,为什么测试精度会在一段时间后降低?我们如何解决这个问题?

因为发生了过拟合。可以增加样本数。