TensorFlow02.2手写数字识别的实现

发布时间 2023-06-15 16:48:08作者: 哎呦哎(iui)

step 0 数据的导入和加载

(x, y), (x_val, y_val) = datasets.mnist.load_data() 
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
y = tf.one_hot(y, depth=10)
print(x.shape, y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
train_dataset = train_dataset.batch(200)

step 1 求out

其中out=relu{relu{relu[X@W1+b1]@W2+b2}@W3+b3}

#我们之前说过784->512->256->10,这里就是这个,然后激活函数是relu
model = keras.Sequential([
    layers.Dense(512, activation='relu'),
    layers.Dense(256, activation='relu'),
    layers.Dense(10)])

#这里我们之前说过求损失函数 这是梯度下降的优化器,这里我们只需要设置步长就行
optimizer = optimizers.SGD(learning_rate=0.001)

step 2 求out和loss

with tf.GradientTape() as tape:
    # [b, 28, 28] => [b, 784]这一步就是打平
    x = tf.reshape(x, (-1, 28*28))
    # Step1. compute output,求出out
    # [b, 784] => [b, 10]
    out = model(x)
    # Step2. compute loss 我们利用欧氏距离求l0ss
    loss = tf.reduce_sum(tf.square(out - y)) / x.shape[0]

step 3 计算梯度并优化

首先我们要知道这个API:tape.gradient(loss,model.trainable_variables)
这个API中loss是一个函数,就是损失函数,然后这个model.trainable_variables,就是要求导的值,这里model.trainable_varibales=[w1,w2,w3,b1,b2,b3],然后返回的结果就是[\({d(loss)\over d(w1)}\),\({d(loss)\over d(w2)}\),\({d(loss)\over d(w3)}\),\({d(loss)\over d(b1)}\),\({d(loss)\over d(b2)}\),\({d(loss)\over d(b3)}\)]。

# Step3. optimize and update w1, w2, w3, b1, b2, b3,这里model.trainable_variables=[w1,w2,w3,b1,b2,b3]
        grads = tape.gradient(loss, model.trainable_variables=[w1,w2,w3,b1,b2,b3])
        # w' = w - lr * grad
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

step 4 Loop

def train_epoch(epoch):
    # Step4.loop
    for step, (x, y) in enumerate(train_dataset):
        with tf.GradientTape() as tape:
            # [b, 28, 28] => [b, 784]
            x = tf.reshape(x, (-1, 28*28))
            # Step1. compute output
            # [b, 784] => [b, 10]
            out = model(x)
            # Step2. compute loss
            loss = tf.reduce_sum(tf.square(out - y)) / x.shape[0]

        # Step3. optimize and update w1, w2, w3, b1, b2, b3
        grads = tape.gradient(loss, model.trainable_variables)
        # w' = w - lr * grad
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

        if step % 100 == 0:
            print(epoch, step, 'loss:', loss.numpy())

总的代码

import  os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'


import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, optimizers, datasets




(x, y), (x_val, y_val) = datasets.mnist.load_data() 
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255.
y = tf.convert_to_tensor(y, dtype=tf.int32)
y = tf.one_hot(y, depth=10)
print(x.shape, y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y))
train_dataset = train_dataset.batch(200)

 


model = keras.Sequential([ 
    layers.Dense(512, activation='relu'),
    layers.Dense(256, activation='relu'),
    layers.Dense(10)])

optimizer = optimizers.SGD(learning_rate=0.001)


def train_epoch(epoch):
    # Step4.loop
    for step, (x, y) in enumerate(train_dataset):
        with tf.GradientTape() as tape:
            # [b, 28, 28] => [b, 784]
            x = tf.reshape(x, (-1, 28*28))
            # Step1. compute output
            # [b, 784] => [b, 10]
            out = model(x)
            # Step2. compute loss
            loss = tf.reduce_sum(tf.square(out - y)) / x.shape[0]

        # Step3. optimize and update w1, w2, w3, b1, b2, b3
        grads = tape.gradient(loss, model.trainable_variables)
        # w' = w - lr * grad
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

        if step % 100 == 0:
            print(epoch, step, 'loss:', loss.numpy())



def train():
    for epoch in range(30):
        train_epoch(epoch)






if __name__ == '__main__':
    train()