TensorFlow10.4 卷积神经网络-ResNet与DenseNet及ResNet实战

发布时间 2023-06-24 23:22:49作者: 哎呦哎(iui)

1 ResNet

image
我们是实验发现在我们堆叠更多的网络结构的时候,我们并不能又一个很好的结果,就是它网络层次变多了之后他会产生一个多层的loss的堆叠,使得梯度爆炸,或者梯度弥散。然后我们想了一个办法,就是我们比如说设置了一个30层的神经网络,我们在差也不能比22层的差。就是我们设置了一个回路。
在这个回路中我们的\(\delta\)E/\(\delta\)x'=1,这样如果第30层的比22层的更差的话,我们就走这条回路。
其中它的一个unit就是:
image
然后我们计算一个这个参数个数:
image

这使得我们堆叠很多的层次可以实现:
image
image
Why call Residual?
image
这个F(x)=H(x)-x,这个是一个残差。所以叫残差网络。
image

实现:
image

image
image

2 DenseNet

image
就是这一层有机会和他前面的每一层相连。

3 ResNet实战

首先我们先学习几个API:

keras.layers.GlobalAveragePooling2D(data_format=None)
data_format: 表示输入张量的维度顺序,默认为 [batch, height, width, channel]
2D全局平均池化
输入张量维度为[batch, height, width, channel],输出张量维度为[batch, channel]
例如:

from tensorflow.keras.layers import GlobalAveragePooling2D
import tensorflow as tf
import numpy as np
 
# 定义一个全局平均池化层
pool = GlobalAveragePooling2D()
 
# 生成一个维度为[64, 720, 720, 3]的矩阵
x = np.random.random((64, 720, 720, 3))
 
# 转成tensor类型,第一个维度64表示batch
# numpy中的数据类型和tensorflow中的数据类型完全兼容,所以这一步可以省略
x = tf.convert_to_tensor(x)
print(x.shape) # [64, 720, 720, 3]
 
# 进行全局平均池化
y = pool(x)
print(y.shape) # [64, 3]

这ResNet的核心就是这个Basic Block。
其核心就是这个:
image

这是一个Res Block:
image

对于这个ResNet18:
image
它的表示是[2,2,2,2]其中这个里面就是有8个ResBlock,每个ResBloc都有两个convolution。

这是模型:

import  tensorflow as tf
from    tensorflow import keras
from    tensorflow.keras import layers, Sequential



class BasicBlock(layers.Layer):

    def __init__(self, filter_num, stride=1):
        super(BasicBlock, self).__init__()

        self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same')
        self.bn1 = layers.BatchNormalization()
        self.relu = layers.Activation('relu')

        self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same')
        self.bn2 = layers.BatchNormalization()

        if stride != 1:
            self.downsample = Sequential()
            self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
        else:
            self.downsample = lambda x:x



    def call(self, inputs, training=None):

        # [b, h, w, c]
        out = self.conv1(inputs)
        out = self.bn1(out,training=training)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out,training=training)

        identity = self.downsample(inputs)

        output = layers.add([out, identity])
        output = tf.nn.relu(output)

        return output


class ResNet(keras.Model):


    def __init__(self, layer_dims, num_classes=100): # [2, 2, 2, 2]
        super(ResNet, self).__init__()

        self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
                                layers.BatchNormalization(),
                                layers.Activation('relu'),
                                layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
                                ])

        self.layer1 = self.build_resblock(64,  layer_dims[0])
        self.layer2 = self.build_resblock(128, layer_dims[1], stride=2)
        self.layer3 = self.build_resblock(256, layer_dims[2], stride=2)
        self.layer4 = self.build_resblock(512, layer_dims[3], stride=2)

        # output: [b, 512, h, w],
        self.avgpool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(num_classes)





    def call(self, inputs, training=None):

        x = self.stem(inputs,training=training)

        x = self.layer1(x,training=training)
        x = self.layer2(x,training=training)
        x = self.layer3(x,training=training)
        x = self.layer4(x,training=training)

        # [b, c]
        x = self.avgpool(x)
        # [b, 100]
        x = self.fc(x)

        return x



    def build_resblock(self, filter_num, blocks, stride=1):

        res_blocks = Sequential()
        # may down sample
        res_blocks.add(BasicBlock(filter_num, stride))

        for _ in range(1, blocks):
            res_blocks.add(BasicBlock(filter_num, stride=1))

        return res_blocks


def resnet18():
    return ResNet([2, 2, 2, 2]) #这个是指的是每层两个BasicBlock,但是每个BasicBlock又有两个conv,然后再加2(预处理+后面的全连接层)


def resnet34():
    return ResNet([3, 4, 6, 3])

模型的使用:

import  os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'

import  tensorflow as tf
from    tensorflow.keras import layers, optimizers, datasets, Sequential 
from    resnet import resnet18 

tf.random.set_seed(2345)





def preprocess(x, y):
    # [-1~1]
    x = tf.cast(x, dtype=tf.float32) / 255. - 0.5
    y = tf.cast(y, dtype=tf.int32)
    return x,y


(x,y), (x_test, y_test) = datasets.cifar100.load_data()
y = tf.squeeze(y, axis=1)
y_test = tf.squeeze(y_test, axis=1)
print(x.shape, y.shape, x_test.shape, y_test.shape)


train_db = tf.data.Dataset.from_tensor_slices((x,y))
train_db = train_db.shuffle(1000).map(preprocess).batch(512)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db = test_db.map(preprocess).batch(512)

sample = next(iter(train_db))
print('sample:', sample[0].shape, sample[1].shape,
      tf.reduce_min(sample[0]), tf.reduce_max(sample[0]))


def main():

    # [b, 32, 32, 3] => [b, 1, 1, 512]
    model = resnet18()#加载模型。
    model.build(input_shape=(None, 32, 32, 3))
    model.summary()
    optimizer = optimizers.Adam(lr=1e-3)

    for epoch in range(500):

        for step, (x,y) in enumerate(train_db):

            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 100]
                logits = model(x,training=True)
                # [b] => [b, 100]
                y_onehot = tf.one_hot(y, depth=100)
                # compute loss
                loss = tf.losses.categorical_crossentropy(y_onehot, logits, from_logits=True)
                loss = tf.reduce_mean(loss)

            grads = tape.gradient(loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            if step %50 == 0:
                print(epoch, step, 'loss:', float(loss))



        total_num = 0
        total_correct = 0
        for x,y in test_db:

            logits = model(x,training=False)
            prob = tf.nn.softmax(logits, axis=1)
            pred = tf.argmax(prob, axis=1)
            pred = tf.cast(pred, dtype=tf.int32)

            correct = tf.cast(tf.equal(pred, y), dtype=tf.int32)
            correct = tf.reduce_sum(correct)

            total_num += x.shape[0]
            total_correct += int(correct)

        acc = total_correct / total_num
        print(epoch, 'acc:', acc)



if __name__ == '__main__':
    main()