CNN -- Simple Residual Network

发布时间 2023-09-26 19:08:58作者: smiling&weeping

Smiling & Weeping

 

                ---- 我爱你,从这里一直到月亮,再绕回来

说明:

1.要解决的问题:梯度消失

2. 跳连接,H(x) = F(x)+x,张量维度必须一致,加完后再激活。不要做pooling,张量的维度会发生变化

 

  1 # 先是1个卷积层(conv, maxpooling, relu),然后ResidualBlock模块,接下来又是一个卷积层(conv, mp, relu)
  2 # 然后residualBlock模块,最后一个全连接层
  3 
  4 import torch
  5 import torch.nn as nn
  6 from torchvision import transforms
  7 from torchvision import datasets
  8 from torch.utils.data import DataLoader
  9 import torch.nn.functional as F
 10 import torch.optim as optim
 11 import matplotlib.pyplot as plt
 12 
 13 batch_size = 64
 14 # 归一化,均值和方差
 15 transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
 16 
 17 train_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)
 18 train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
 19 test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
 20 test_loader = DataLoader(test_dataset, shuffle=True, batch_size=batch_size)
 21 
 22 class ResidualBlock(nn.Module):
 23     def __init__(self, channels):
 24         super(ResidualBlock, self).__init__()
 25         self.channels = channels
 26         self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
 27         self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
 28         
 29     def forward(self, x):
 30         y = F.relu(self.conv1(x))
 31         y = self.conv2(y)
 32         return F.relu(x+y)
 33     
 34 class Net(nn.Module):
 35     def __init__(self):
 36         super(Net, self).__init__()
 37         self.conv1 = nn.Conv2d(1, 16, kernel_size=5)
 38         self.conv2 = nn.Conv2d(16, 32, kernel_size=5)
 39         
 40         self.rblock1 = ResidualBlock(16)
 41         self.rblock2 = ResidualBlock(32)
 42         
 43         self.mp = nn.MaxPool2d(2)
 44         self.fc = nn.Linear(512, 10)
 45     
 46     def forward(self, x):
 47         in_size = x.size(0)
 48         
 49         x = self.mp(F.relu(self.conv1(x)))
 50         x = self.rblock1(x)
 51         x = self.mp(F.relu(self.conv2(x)))
 52         x = self.rblock2(x)
 53         
 54         x = x.view(in_size, -1)
 55         x = self.fc(x)
 56         return x
 57 
 58 model = Net()
 59 
 60 # construct loss and optimizer
 61 criterion = torch.nn.CrossEntropyLoss()
 62 optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
 63 
 64 device = torch.device('cuda' if torch.cuda.is_available else 'cpu')
 65 model.to(device)
 66 
 67 def train(epoch):
 68     running_loss = 0.0
 69     for batch_idx, data in enumerate(train_loader, 0):
 70         inputs, target = data
 71         inputs, target = inputs.to(device), target.to(device)
 72         optimizer.zero_grad()
 73         
 74         outputs = model(inputs)
 75         loss = criterion(outputs, target)
 76         loss.backward()
 77         optimizer.step()
 78         
 79         running_loss += loss.item()
 80         if batch_idx % 300 == 299:
 81             print('[%d, %5d] loss: %.3f' % (epoch+1, batch_idx+1, running_loss))
 82             running_loss = 0.0
 83 
 84 def test():
 85     correct = 0
 86     total = 0
 87     with torch.no_grad():
 88         for data in test_loader:
 89             images, labels = data
 90             images, labels = images.to(device), labels.to(device)
 91             
 92             outputs = model(images)
 93             _, prediction = torch.max(outputs.data, dim=1)
 94             total += labels.size(0)
 95             correct += (prediction == labels).sum().item()
 96     print('accuracy on test set: %d %%' % (100*correct/total))
 97     return correct/total
 98 
 99 acc = []
100 epoch_list = []
101 for epoch in range(10):
102     train(epoch)
103     accuracy = test()
104     epoch_list.append(epoch)
105     acc.append(accuracy)
106     
107 plt.plot(epoch_list, acc)
108 plt.ylabel('Accuracy')
109 plt.xlabel('epoch')
110 plt.show()
111 
112 class DatasetSubmissionMNIST(torch.utils.data.Dataset):
113     def __init__(self, file_path, transform=None):
114         self.data = pd.read_csv(file_path)
115         self.transform = transform
116         
117     def __len__(self):
118         return len(self.data)
119     
120     def __getitem__(self, index):
121         image = self.data.iloc[index].values.astype(np.uint8).reshape((28, 28, 1))
122 
123         
124         if self.transform is not None:
125             image = self.transform(image)
126             
127         return image
128 
129 transform = transforms.Compose([
130     transforms.ToPILImage(),
131     transforms.ToTensor(),
132     transforms.Normalize(mean=(0.5,), std=(0.5,))
133 ])
134 
135 submissionset = DatasetSubmissionMNIST('/kaggle/input/digit-recognizer/test.csv', transform=transform)
136 submissionloader = torch.utils.data.DataLoader(submissionset, batch_size=batch_size, shuffle=False)
137 
138 submission = [['ImageId', 'Label']]
139 
140 with torch.no_grad():
141     model.eval()
142     image_id = 1
143 
144     for images in submissionloader:
145         images = images.cuda()
146         log_ps = model(images)
147         ps = torch.exp(log_ps)
148         top_p, top_class = ps.topk(1, dim=1)
149         
150         for prediction in top_class:
151             submission.append([image_id, prediction.item()])
152             image_id += 1
153             
154 print(len(submission) - 1)
155 import csv
156 
157 with open('submission.csv', 'w') as submissionFile:
158     writer = csv.writer(submissionFile)
159     writer.writerows(submission)
160     
161 print('Submission Complete!')
162 # submission.to_csv('/kaggle/working/submission.csv', index=False)

现在准确率就很高了

文章到此结束,我们下次再见--<-<-<@

能折磨你的,从来不是别人的绝情,

  而是你的心存幻想和期待。

把自己还给自己,把别人还给别人,

  让花成花,让树成树。