多层感知机

发布时间 2023-07-29 17:07:08作者: o-Sakurajimamai-o
# 多层感知机

# 获取数据
import d2lzh
from mxnet import autograd

batch_size = 256
train_data, test_data = d2lzh.load_data_fashion_mnist(batch_size)

# 读入数据
from mxnet import ndarray as nd

num_input = 28 * 28
num_output = 10

num_hidden = 256  # 中间多加了一层
weight_scale = .01

w1 = nd.random_normal(shape=(num_input, num_hidden), scale=weight_scale)
b1 = nd.zeros(num_hidden)

w2 = nd.random_normal(shape=(num_hidden, num_output), scale=weight_scale)
b2 = nd.zeros(num_output)

params = [w1, b1, w2, b2]

for param in params:
    param.attach_grad()


# 激活函数

def relu(x):  # 如果x大于0就返回x,否则返回0
    return nd.maximum(x, 0)


# 定义模型

def net(x):
    x = x.reshape((-1, num_input))
    h1 = relu(nd.dot(x, w1) + b1)
    output = nd.dot(h1, w2) + b2
    return output


# softmax 与 损失熵函数

from mxnet import gluon

softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()


def accuracy(y_hat, y):
    return (y_hat.argmax(axis=1) == y.astype('float32')).mean().asscalar()


# 训练

learning_rate = .5

for epoch in range(5):
    train_loss = 0.
    train_acc = 0.
    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)

        loss.backward()
        d2lzh.sgd(params, learning_rate, batch_size)

        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)

    test_acc = d2lzh.evaluate_accuracy(test_data, net)
    print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
        epoch, train_loss / len(train_data), train_acc / len(train_data), test_acc / len(train_data)))