Pytorch神经网络构建与训练测试全流程入门

发布时间 2023-07-29 00:31:51作者: 倦鸟已归时

最基本的简单神经网络有三种构建方式:

from torch import nn

# 第1种构建方法,最灵活
class Network(nn.Module):
    def __init__(self):
        super().__init__()
        
        # Inputs to hidden layer linear transformation
        self.hidden = nn.Linear(784, 256)
        # Output layer, 10 units - one for each digit
        self.output = nn.Linear(256, 10)
        
        # Define sigmoid activation and softmax output
        self.sigmoid = nn.Sigmoid()
        self.softmax = nn.Softmax(dim=1)
        
    def forward(self, x):
        # Pass the input tensor through each of our operations
        x = self.hidden(x)
        x = self.sigmoid(x)
        x = self.output(x)
        x = self.softmax(x)
        
        return x
nn1 = Network()
nn1
'''结果:
Network(
  (hidden): Linear(in_features=784, out_features=256, bias=True)
  (output): Linear(in_features=256, out_features=10, bias=True)
  (sigmoid): Sigmoid()
  (softmax): Softmax(dim=1)
)
'''
# 第2种构建方法,Sequential类 input_size = 784 hidden_size = [128, 64] output_size = 10 nn2 = nn.Sequential( nn.Linear(input_size, hidden_size[0]), nn.ReLU(), nn.Linear(hidden_size[0], hidden_size[1]), nn.ReLU(), nn.Linear(hidden_size[1], output_size), nn.Softmax(dim=1) ) nn2 '''结果:
Sequential( (0): Linear(in_features=784, out_features=128, bias=True) (1): ReLU() (2): Linear(in_features=128, out_features=64, bias=True) (3): ReLU() (4): Linear(in_features=64, out_features=10, bias=True) (5): Softmax(dim=1) )
'''

# 第3种构建方法,同样是Sequential类,但是传入字典类型,更加易用 from collections import OrderedDict nn3 = nn.Sequential(OrderedDict([ ('fc1', nn.Linear(input_size, hidden_size[0])), ('relu1', nn.ReLU()), ('fc2', nn.Linear(hidden_size[0], hidden_size[1])), ('relu2', nn.ReLU()), ('output', nn.Linear(hidden_size[1], output_size)), ('softmax', nn.Softmax(dim=1)) ])) nn3

'''结果:
Sequential(
  (fc1): Linear(in_features=784, out_features=128, bias=True)
  (relu1): ReLU()
  (fc2): Linear(in_features=128, out_features=64, bias=True)
  (relu2): ReLU()
  (output): Linear(in_features=64, out_features=10, bias=True)
  (softmax): Softmax(dim=1)
)
'''

然后查看模型结构的方法分别如下:

nn1 = Network()
nn1

print(nn1.hidden)
print(nn2[2])
print(nn3[4])
print(nn3.output)

'''
Linear(in_features=784, out_features=256, bias=True)
Linear(in_features=128, out_features=64, bias=True)
Linear(in_features=64, out_features=10, bias=True)
Linear(in_features=64, out_features=10, bias=True)
'''

模型训练与测试的全流程。

案例1:最简单的学习模型——线性回归。

## linear regression simply implement
# https://blog.csdn.net/qq_27492735/article/details/89707150
import torch
from torch import nn, optim
from torch.autograd import Variable

# 读取训练数据,这里不读取了,直接定义一个最简单的数据x及其标签y
x = Variable(torch.Tensor([[1, 2], [3, 4], [4, 2]]), requires_grad=False)
y = Variable(torch.Tensor([[3], [7], [6]]), requires_grad=False)
# model constract
def model():
    # 模型
    net = nn.Sequential(
        nn.Linear(2, 4),
        nn.ReLU6(),
        nn.Linear(4, 3),
        nn.ReLU(),
        nn.Linear(3, 1)
    )
    # 优化器与损失函数
    optimizer = optim.Adam(net.parameters(), lr=0.01)
    loss_fun =nn.MSELoss()
    # 迭代步骤
    for i in range(300):
        # 1 前向传播
        out = net(x)
        # 2 计算损失
        loss = loss_fun(out, y)
        print(loss)
        # 3 梯度清零
        optimizer.zero_grad()
        # 4 反向传播
        loss.backward()
        # 5 更新优化器
        optimizer.step()
    # 计算预测值
    print(net(x))
    # 保存训练好的模型(参数)
    # torch.save(net, 'simplelinreg.npy')
    return net
net = model()
end