搭建PyTorch神经网络进行气温预测

发布时间 2023-05-16 17:18:52作者: Frommoon

本文基于PyTorch,搭建了神经网络,实现了对气温的预测。

(1)读入数据

features = pd.read_csv('temps.csv')#其中共348条数据,每条数据有9个特征

(2)预处理数据

  • 处理时间数据
# 处理时间数据,方便操作
import datetime

# 分别得到年,月,日
years = features['year']
months = features['month']
days = features['day']

# datetime格式
dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)]
dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates]

处理时间数据结果:

  • 可视化数据
 # 准备画图
# 指定默认风格
plt.style.use('fivethirtyeight')

# 设置布局
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize = (10,10))
fig.autofmt_xdate(rotation = 45)

# 标签值
ax1.plot(dates, features['actual'])
ax1.set_xlabel(''); ax1.set_ylabel('Temperature'); ax1.set_title('Max Temp')

# 昨天
ax2.plot(dates, features['temp_1'])
ax2.set_xlabel(''); ax2.set_ylabel('Temperature'); ax2.set_title('Previous Max Temp')

# 前天
ax3.plot(dates, features['temp_2'])
ax3.set_xlabel('Date'); ax3.set_ylabel('Temperature'); ax3.set_title('Two Days Prior Max Temp')

# 我的逗逼朋友
ax4.plot(dates, features['friend'])
ax4.set_xlabel('Date'); ax4.set_ylabel('Temperature'); ax4.set_title('Friend Estimate')

plt.tight_layout(pad=2)
  • 将week列的字符串转换成数值
 # 独热编码:将week列的字符串转换成数值
features = pd.get_dummies(features)

结果:

  • 明确标签
 # 标签
labels = np.array(features['actual'])

# 在特征中去掉标签
features= features.drop('actual', axis = 1)

# 名字单独保存一下,以备后患
feature_list = list(features.columns)

# 转换成合适的格式
features = np.array(features)
  • 标准化数据
 from sklearn import preprocessing#导入sklearn的预处理模块
input_features = preprocessing.StandardScaler().fit_transform(features)#标准化操作:数值浮动范围化小

标准化数据结果:

(3)构建网络模型

  • 首先需要将x,y转换成tensor格式,并设置权重参数,接着,进行前向传播,然后,根据前向传播计算损失,最后,反向传播计算梯度,更新参数。
x = torch.tensor(input_features, dtype = float)#把ndarry结构的input_features转换成tensor格式

y = torch.tensor(labels, dtype = float)#把ndarry结构的label转换成tensor格式

# 权重参数初始化
weights = torch.randn((14, 128), dtype = float, requires_grad = True) #14个特征,转换成128个隐藏层神经元
biases = torch.randn(128, dtype = float, requires_grad = True) #128个神经元都含偏置项
weights2 = torch.randn((128, 1), dtype = float, requires_grad = True) #回归任务,最后返回1个值
biases2 = torch.randn(1, dtype = float, requires_grad = True) 

learning_rate = 0.001 #学习率
losses = []#损失

for i in range(1000):
    # 计算隐层
    hidden = x.mm(weights) + biases#.mm表示矩阵乘法
    # 加入激活函数
    hidden = torch.relu(hidden)
    # 预测结果
    predictions = hidden.mm(weights2) + biases2
    # 通计算损失
    loss = torch.mean((predictions - y) ** 2) #均方误差
    losses.append(loss.data.numpy())
    
    # 打印损失值
    if i % 100 == 0:
        print('loss:', loss)
    #返向传播计算
    loss.backward()
    
    #更新参数
    weights.data.add_(- learning_rate * weights.grad.data)  #-表示反方向,学习率乘以梯度值
    biases.data.add_(- learning_rate * biases.grad.data)
    weights2.data.add_(- learning_rate * weights2.grad.data)
    biases2.data.add_(- learning_rate * biases2.grad.data)
    
    # 每次迭代都得记得清空,不然会累加影响结果
    weights.grad.data.zero_()
    biases.grad.data.zero_()
    weights2.grad.data.zero_()
    biases2.grad.data.zero_()

损失值打印:

(4)更简单的网络模型

input_size = input_features.shape[1]#总共样本数量
hidden_size = 128#隐藏层神经元个数
output_size = 1#输出结果1个
batch_size = 16#一次训练所选取的样本数
my_nn = torch.nn.Sequential(#按序列调用nn模块下实现的神经网络
    torch.nn.Linear(input_size, hidden_size),#全连接层,传入输入和得到结果参数名
    torch.nn.Sigmoid(),#激活函数
    torch.nn.Linear(hidden_size, output_size),#全连接层,传入输入和得到结果参数名
)
cost = torch.nn.MSELoss(reduction='mean')#损失值
optimizer = torch.optim.Adam(my_nn.parameters(), lr = 0.001)#优化器Adam可动态调整学习率

# 训练网络
losses = []
for i in range(1000):
    batch_loss = []
    # MINI-Batch方法来进行训练
    for start in range(0, len(input_features), batch_size):
        end = start + batch_size if start + batch_size < len(input_features) else len(input_features)
        xx = torch.tensor(input_features[start:end], dtype = torch.float, requires_grad = True)#传入一次的数据,start索引到end索引
        yy = torch.tensor(labels[start:end], dtype = torch.float, requires_grad = True)#批量取数据
        prediction = my_nn(xx)#前向传播,调用刚写好的序列函数
        loss = cost(prediction, yy)#损失
        optimizer.zero_grad()#每次迭代梯度清0
        loss.backward(retain_graph=True)#反向传播
        optimizer.step()#更新参数
        batch_loss.append(loss.data.numpy())
    
    # 打印损失
    if i % 100==0:
        losses.append(np.mean(batch_loss))
        print(i, np.mean(batch_loss))

损失结果:

(5)预测结果

x = torch.tensor(input_features, dtype = torch.float)#转换格式,这里用原始数据做预测
predict = my_nn(x).data.numpy()#进行一次前向传播并转换成数组格式方便后续绘图展示
# 转换日期格式
dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)]
dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in dates]

# 创建一个表格来存日期和其对应的标签数值
true_data = pd.DataFrame(data = {'date': dates, 'actual': labels})

# 同理,再创建一个来存日期和其对应的模型预测值
months = features[:, feature_list.index('month')]
days = features[:, feature_list.index('day')]
years = features[:, feature_list.index('year')]

test_dates = [str(int(year)) + '-' + str(int(month)) + '-' + str(int(day)) for year, month, day in zip(years, months, days)]

test_dates = [datetime.datetime.strptime(date, '%Y-%m-%d') for date in test_dates]

predictions_data = pd.DataFrame(data = {'date': test_dates, 'prediction': predict.reshape(-1)}) #.reshape(-1)转换成1列
# 真实值
plt.plot(true_data['date'], true_data['actual'], 'b-', label = 'actual')

# 预测值
plt.plot(predictions_data['date'], predictions_data['prediction'], 'ro', label = 'prediction')
plt.xticks(rotation = 'horizontal'); 
plt.legend()

# 图名
plt.xlabel('Date'); plt.ylabel('Maximum Temperature (F)'); plt.title('Actual and Predicted Values');