深度学习中对多个目标标签进行训练和预测代码实例

发布时间 2023-10-04 14:53:11作者: 不像话
#Tensofrlow
#假设我们有一个任务是从图像中预测物体的位置(x坐标和y坐标)和物体的类别。这个任务有三个目标标签:x坐标、y坐标和类别。
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Input, Dense

# 创建模拟数据
num_samples = 1000
num_classes = 10

# 特征数据
X = np.random.rand(num_samples, 64)

# 目标标签:x坐标、y坐标、类别
y_x = np.random.rand(num_samples, 1)
y_y = np.random.rand(num_samples, 1)
y_class = np.random.randint(0, num_classes, size=(num_samples,))

# 划分训练集和测试集
split_ratio = 0.8
num_train_samples = int(num_samples * split_ratio)

X_train, X_test = X[:num_train_samples], X[num_train_samples:]
y_train_x, y_test_x = y_x[:num_train_samples], y_x[num_train_samples:]
y_train_y, y_test_y = y_y[:num_train_samples], y_y[num_train_samples:]
y_train_class, y_test_class = y_class[:num_train_samples], y_class[num_train_samples:]

# 创建输入层
input_layer = Input(shape=(64,))

# 创建多个输出层,每个输出层对应一个目标标签
output_x = Dense(1, name='output_x')(input_layer)
output_y = Dense(1, name='output_y')(input_layer)
output_class = Dense(num_classes, activation='softmax', name='output_class')(input_layer)

# 创建多输出模型
model = keras.Model(inputs=input_layer, outputs=[output_x, output_y, output_class])

# 编译模型,可以为每个输出指定不同的损失函数
model.compile(optimizer='adam',
              loss={'output_x': 'mean_squared_error',
                    'output_y': 'mean_squared_error',
                    'output_class': 'sparse_categorical_crossentropy'},
              metrics={'output_x': 'mae', 'output_y': 'mae', 'output_class': 'accuracy'})

# 训练模型
history = model.fit(X_train, {'output_x': y_train_x, 'output_y': y_train_y, 'output_class': y_train_class},
                    validation_split=0.2, epochs=10, batch_size=32)

# 使用模型进行多输出预测
y_pred_x, y_pred_y, y_pred_class = model.predict(X_test)

# y_pred_x包含了对x坐标的预测
# y_pred_y包含了对y坐标的预测
# y_pred_class包含了对类别的预测

‘’‘
在多输出模型中,每个输出层可以有不同的损失函数,以适应不同类型的目标标签。
‘’‘
#pythroch
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import numpy as np

# 假设你有训练数据和测试数据
X_train = np.random.rand(100, 10)  # 假设有100个样本和10个特征
y_train = np.random.rand(100, 5)   # 假设有5个目标标签
X_test = np.random.rand(20, 10)
y_test = np.random.rand(20, 5)

# 将数据转换为 PyTorch 张量
X_train_tensor = torch.FloatTensor(X_train)
y_train_tensor = torch.FloatTensor(y_train)
X_test_tensor = torch.FloatTensor(X_test)
y_test_tensor = torch.FloatTensor(y_test)

# 创建一个多输出的神经网络模型
class MultiOutputModel(nn.Module):
    def __init__(self, input_size, output_size):
        super(MultiOutputModel, self).__init__()
        self.fc1 = nn.Linear(input_size, 64)
        self.fc2 = nn.Linear(64, output_size)

    def forward(self, x):
        x = torch.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 初始化模型
input_size = X_train.shape[1]  # 输入特征的维度
output_size = y_train.shape[1]  # 输出标签的维度
model = MultiOutputModel(input_size, output_size)

# 定义损失函数和优化器
criterion = nn.MSELoss()  # 均方误差损失
optimizer = optim.Adam(model.parameters(), lr=0.001)  # Adam 优化器

# 转换数据为 DataLoader
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)

# 训练模型
num_epochs = 100
for epoch in range(num_epochs):
    for batch_X, batch_y in train_loader:
        optimizer.zero_grad()
        outputs = model(batch_X)
        loss = criterion(outputs, batch_y)
        loss.backward()
        optimizer.step()

# 在测试集上进行预测
with torch.no_grad():
    test_outputs = model(X_test_tensor)
    test_loss = criterion(test_outputs, y_test_tensor)
    print(f"Test Loss: {test_loss.item()}")

# 打印预测的结果
print("Predicted Outputs:")
print(test_outputs.numpy())