2023.12.14

发布时间 2023-12-14 21:36:53作者: 太好了还有脑子可以用
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms

# Assuming you have a simple CNN model
class SimpleCNN(nn.Module):
    def __init__(self):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
        self.relu = nn.ReLU(inplace=True)
        self.mod_att = ModulatedAttLayer(in_channels=64, reduction=2, mode='embedded_gaussian')
        self.fc = nn.Linear(64 * 7 * 7, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x, _ = self.mod_att(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)
        return x

# Dummy data loader for demonstration purposes
transform = transforms.Compose([transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
    datasets.CIFAR10(root='./data', train=True, download=True, transform=transform),
    batch_size=32, shuffle=True, num_workers=4)

# Initialize the model and optimizer
model = SimpleCNN()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# Training loop
for epoch in range(5):
    for batch_idx, (data, target) in enumerate(train_loader):
        optimizer.zero_grad()
        output = model(data)
        loss = nn.CrossEntropyLoss()(output, target)
        loss.backward()
        optimizer.step()

        if batch_idx % 100 == 0:
            print(f'Epoch: {epoch}, Batch: {batch_idx}, Loss: {loss.item()}')

# Testing the attention mechanism
test_data, _ = next(iter(train_loader))
test_output, attention_maps = model(test_data)

# Visualize or analyze the attention maps as needed