HuggingFace | 使用Roberta训练一个牛客网讨论贴文本分类模型

发布时间 2023-07-30 18:37:58作者: 张Zong在修行

训练一个NLU模型

本文将使用trainer 训练一个牛客网讨论帖文本分类模型。详细过程如下:

构建数据集

数据集下载链接:

train data
test data

正常的训练演示用这两个数据集就够了,如果需要训练很精确的模型,可以使用伪标签大数据集generated pesudo data

数据集的结构如下:

每条数据包含一个文本和一个label,label为: [招聘信息、 经验贴、 求助贴] 三种类型之一。

我们需要加载数据集,并将文本tokenize成id,代码如下:

import pandas as pd
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForMaskedLM, AutoModelForSequenceClassification

model_name = "uer/chinese_roberta_L-4_H-512"

max_input_length = 128
label2id = {
    '招聘信息':0,
    '经验贴':1,
    '求助贴':2
}
id2label = {v:k for k,v in label2id.items()}

tokenizer = AutoTokenizer.from_pretrained(model_name)

def preprocess_function(examples):
    model_inputs = tokenizer(examples["text"], max_length=max_input_length, truncation=True)
    labels = [label2id[x] for x in examples['target']]
    model_inputs["labels"] = labels
    return model_inputs


raw_datasets = load_dataset('csv', data_files={'train': 'train.csv', 'test': 'test.csv'})
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets['train'].column_names)

定义评价指标函数

评价指标metric用于evaluate的时候衡量模型的表现,这里使用f1 score 和 accuracy

import numpy as np
from sklearn.metrics import f1_score, accuracy_score, classification_report
from transformers import EvalPrediction

def multi_label_metrics(predictions, labels, threshold=0.5):
    probs =  np.argmax( predictions, -1)       
    y_true = labels
    f1_micro_average = f1_score(y_true=y_true, y_pred=probs, average='micro')
    accuracy = accuracy_score(y_true, probs)
    print(classification_report([id2label[x] for x in y_true], [id2label[x] for x in probs]))
    # return as dictionary
    metrics = {'f1': f1_micro_average,
               'accuracy': accuracy}
    return metrics
 
def compute_metrics(p: EvalPrediction):
    preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
    result = multi_label_metrics(predictions=preds, labels=p.label_ids)
    return result

指定模型的训练参数

加载模型,并构建TrainingArguments类,用于指定模型训练的各种参数

第一个是训练保存地址为必填项,其他都是选填项

from transformers import TrainingArguments, Trainer

model = AutoModelForSequenceClassification.from_pretrained(model_name, 
                                        # problem_type="multi_label_classification", 
                                        num_labels=3,
                                        # id2label=id2label,
                                        # label2id=label2id
                                        )

batch_size = 64
metric_name = "f1"

training_args = TrainingArguments(
    output_dir="./out",
    evaluation_strategy = "epoch",
    save_strategy = "epoch",
    learning_rate=2e-4,
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=batch_size,
    # gradient_accumulation_steps=2,
    num_train_epochs=10,
    save_total_limit=1,
    weight_decay=0.01,
    load_best_model_at_end=True,
    metric_for_best_model=metric_name,
    fp16=True,
)

定义trainer并进行训练

trainer = Trainer(
    model,
    training_args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["test"],
    tokenizer=tokenizer,
    compute_metrics=compute_metrics
)

trainer.train()  # 开始训练

测试预测

print("test")
print(trainer.evaluate())  # 测试
trainer.save_model("bert")  #保存模型

# 进行模型预测,并将预测结果输出便于观察
predictions, labels, _ = trainer.predict(tokenized_datasets["test"])
predictions = np.argmax(predictions, axis=-1)
print(predictions)
print(labels)

代码整合

将上面代码整合到一起,结果如下:

import pandas as pd
import numpy as np
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForMaskedLM, AutoModelForSequenceClassification
from transformers import TrainingArguments, Trainer
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score, classification_report
from transformers import EvalPrediction

import evaluate

metric = evaluate.load("seqeval")

model_name = "uer/chinese_roberta_L-4_H-512"
tokenizer = AutoTokenizer.from_pretrained(model_name)

max_input_length = 128
label2id = {
    '招聘信息':0,
    '经验贴':1,
    '求助贴':2
}
id2label = {v:k for k,v in label2id.items()}

def preprocess_function(examples):
    model_inputs = tokenizer(examples["text"], max_length=max_input_length, truncation=True)
    labels = [label2id[x] for x in examples['target']]
    model_inputs["labels"] = labels
    return model_inputs

raw_datasets = load_dataset('csv', data_files={'train': 'train.csv', 'test': 'test.csv'})
tokenized_datasets = raw_datasets.map(preprocess_function, batched=True, remove_columns=raw_datasets['train'].column_names)


def multi_label_metrics(predictions, labels, threshold=0.5):
    probs =  np.argmax( predictions, -1)       
    y_true = labels
    f1_micro_average = f1_score(y_true=y_true, y_pred=probs, average='micro')
    accuracy = accuracy_score(y_true, probs)
    print(classification_report([id2label[x] for x in y_true], [id2label[x] for x in probs]))
    # return as dictionary
    metrics = {'f1': f1_micro_average,
               'accuracy': accuracy}
    return metrics
 
def compute_metrics(p: EvalPrediction):
    preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
    result = multi_label_metrics(predictions=preds, labels=p.label_ids)
    return result


model = AutoModelForSequenceClassification.from_pretrained(model_name, 
                                        # problem_type="multi_label_classification", 
                                        num_labels=3,
                                        # id2label=id2label,
                                        # label2id=label2id
                                        )

batch_size = 64
metric_name = "f1"

training_args = TrainingArguments(
    f"/root/autodl-tmp/run",
    evaluation_strategy = "epoch",
    save_strategy = "epoch",
    learning_rate=2e-4,
    per_device_train_batch_size=batch_size,
    per_device_eval_batch_size=batch_size,
    # gradient_accumulation_steps=2,
    num_train_epochs=10,
    save_total_limit=1,
    weight_decay=0.01,
    load_best_model_at_end=True,
    metric_for_best_model=metric_name,
    fp16=True,
)

trainer = Trainer(
    model,
    training_args,
    train_dataset=tokenized_datasets["train"],
    eval_dataset=tokenized_datasets["test"],
    tokenizer=tokenizer,
    compute_metrics=compute_metrics
)

trainer.train()

print("test")
print(trainer.evaluate())
trainer.save_model("bert") # 模型保存到当前文件夹的名为bert文件夹下。

predictions, labels, _ = trainer.predict(tokenized_datasets["test"])
predictions = np.argmax(predictions, axis=-1)

print(predictions)
print(labels)

模型推理预测

使用训练好的模型在其他数据集上推理预测,新数据集是从牛客网爬取的帖子信息,接近4万条,数据链接: historical_data

数据截图如下:

from transformers import AutoTokenizer, AutoModelForSequenceClassification
import pandas as pd
import torch

data = pd.read_excel("historical_data.xlsx", sheet_name=0).fillna(" ")
data['text'] = data['title'].apply(lambda x : str(x) if x else "") + data['content'].apply(lambda x : str(x) if x else "")

model_name = "bert"
model = AutoModelForSequenceClassification.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

if torch.cuda.is_available():
    device = "cuda:0"
    model.half()
else:
    device = "cpu"
model = model.to(device)

max_target_length = 128
label2id = {
    '招聘信息':0,
    '经验贴':1,
    '求助贴':2
}
id2label = {v:k for k,v in label2id.items()}

def get_answer(text):
    text = [x for x in text]
    inputs = tokenizer( text, return_tensors="pt", max_length=max_target_length, padding=True, truncation=True)
    inputs = {k:v.to(device) for k,v in inputs.items()}
    with torch.no_grad():
        outputs = model(**inputs).logits.argmax(-1).tolist()
    return outputs

# print(get_answer(data['text'][:10]))

pred , grod = [], []
index, batch_size = 0, 32

while index < len(data['text']):
    pred.extend(get_answer([x for x in data['text'][index:index + batch_size]]))
    index += batch_size

# print(pred)
# print(grod)

pred = [id2label[x] for x in pred]
data["target"] = pred

writer = pd.ExcelWriter("generate.xlsx")
data.to_excel(writer, index=False, encoding='utf-8', sheet_name='Sheet1')
writer.save()
writer.close()

生成一个generate.xlsx文件。

generate.xlsx文件为:

任务为:titlecontent整合成text,然后进行训练,输出为【求助帖、招聘信息、经验贴】中的一种,也就是预测成的target