使用预训练的模型BERT来完成对整个竞赛的数据分析
lf0517
发表于 2023-1-14 22:04:15
1076
0
0
导入需要的库7 D6 _! }4 I E% f& X8 A& y
import numpy as np
import pandas as pd+ _5 p4 T, C( i; T) N3 h! \1 {
from math import ceil, floor8 E. U$ d7 o* d h' \ _ G* x/ e
import tensorflow as tf9 W/ ?* w- H* i3 r( s
import tensorflow.keras.layers as L
from tensorflow.keras.initializers import TruncatedNormal( ?8 i+ D1 ~( {
from sklearn import model_selection
from transformers import BertConfig, TFBertPreTrainedModel, TFBertMainLayer
from tokenizers import BertWordPieceTokenizer% h1 e) a0 a8 ?6 Y# V" Q, R
读取并解释数据+ T/ e1 r& L3 i; x" y
在竞赛中,对数据的理解是非常关键的。因此我们首先要做的就是读取数据,然后查看数据的内容以及特点。
先用pandas来读取csv数据," ?2 o% t- B' C5 B, q3 z3 l
train_df = pd.read_csv('train.csv')
train_df.dropna(inplace=True)( R0 t3 |& H4 P/ Q& P+ M) I
test_df = pd.read_csv('test.csv')
test_df.loc[:, "selected_text"] = test_df.text.values7 g# X0 ]# k3 n5 T3 I" k6 U4 k, \
submission_df = pd.read_csv('sample_submission.csv')
再查看下我们的数据的数量,我们一共有27485条训练数据,3535条测试数据,2 f' D8 ?; m% |6 H7 @
print("train numbers =", train_df.shape)$ [- e" R; b1 e2 [8 Y
print("test numbers =", test_df.shape)
紧接着查看训练数据和测试数据前10条表单的字段跟数据,表单中包含了一下几个数据字段:5 j4 F7 O/ Q2 i9 J; A0 w
textID: 文本数据记录的唯一ID;: A$ I, o# ^) L
text: 原始语句;
' V- d) W0 }( o- z% ]
selected_text: 表示情感的语句;
sentiment: 情感类型, neutral中立, positive积极, negative消极;6 G4 i1 p0 h O% L
! E5 x' Q3 b( z6 v
5 t% y$ r9 j. R
从数据中我们可以得出,目标就是根据现有的情感从原本是的语句中选出能代表这个情感的语句部分。
train_df.head(10)- H3 C6 k9 n5 h
test_df.head(10)0 b/ w9 s( M+ ~" O p) ^. h
定义常量- w1 ?0 {# s5 ]3 `/ Y
# bert预训练权重跟数据存放的目录
PATH = "./bert-base-uncased/"
# 语句最大长度' c. L3 n$ M* G- R
MAX_SEQUENCE_LENGTH = 128
载入词向量
BERT是依据一个固定的词向量来进行训练的。因此在竞赛中需要先使用BertWordPieceTokenizer来加载这些词向量,其中的lowercase=True表示所有的词向量都是小写。设置大小写不敏感可以减少模型对资源的占用。
TOKENIZER = BertWordPieceTokenizer(f"{PATH}/vocab.txt", lowercase=True)* U! x. P0 {$ g
定义数据加载器
定义数据预处理函数
def preprocess(tweet, selected_text, sentiment):4 r; S9 V( b- L& x: I* i2 Y
# 将被转成byte string的原始字符串转成utf-8的字符串
tweet = tweet.decode('utf-8')7 b$ x1 {* A" R3 x
selected_text = selected_text.decode('utf-8')9 M" X9 j% a5 B
sentiment = sentiment.decode('utf-8')" N& @/ R$ ~* N. z7 M4 I$ e
tweet = " ".join(str(tweet).split())
selected_text = " ".join(str(selected_text).split())7 Z5 Y8 @ X5 |7 C( j8 S8 c7 G
0 V$ K% |) p9 p, x/ v% w
# 标记出selected text和text共有的单词1 C- r% G) Q% ~
idx_start, idx_end = None, None
for index in (i for i, c in enumerate(tweet) if c == selected_text[0]): D1 m. \8 v8 Q! f5 n/ Q0 r
if tweet[index:index+len(selected_text)] == selected_text:
idx_start = index
idx_end = index + len(selected_text)
break7 m) [2 B" Y% ~6 b5 B; N
intersection = [0] * len(tweet)
if idx_start != None and idx_end != None:
for char_idx in range(idx_start, idx_end):
intersection[char_idx] = 1! h: Y6 G9 K5 { G8 }
( w$ g$ V: f6 M" \- n7 e; `
# 对原始数据用词向量进行编码, 这里会返回原始数据中的词在词向量中的下标: `8 Q6 h# T5 R1 n; @3 T9 \. J# S
# 和原始数据中每个词向量的单词在文中的起始位置跟结束位置, g7 @# T6 u) J/ z. ]( g- Z% Z
enc = TOKENIZER.encode(tweet)
input_ids_orig, offsets = enc.ids, enc.offsets% B h% P+ B& V1 h6 A8 Z
target_idx = []+ w5 N. ?8 M: e E' U$ b
for i, (o1, o2) in enumerate(offsets):
if sum(intersection[o1: o2]) > 0:
target_idx.append(i)
target_start = target_idx[0]
target_end = target_idx[-1]4 d, |8 R' V, t* W3 D
sentiment_map = {
'positive': 3893,; l' i" O% n1 }. X
'negative': 4997,
'neutral': 8699, \" Z3 K& ` l$ S% L
}4 |4 e3 X3 A9 v+ ?
# 将情感标签和原始的语句的词向量组合在一起组成我们新的数据+ p' m5 h5 w5 m4 F
input_ids = [101] + [sentiment_map[sentiment]] + [102] + input_ids_orig + [102]
input_type_ids = [0] * (len(input_ids_orig) + 4)5 U: {. m2 y) S, f) q! t+ [" u9 h6 b
attention_mask = [1] * (len(input_ids_orig) + 4)
offsets = [(0, 0), (0, 0), (0, 0)] + offsets + [(0, 0)]1 n; `) ]2 q" X* d; X# `
target_start += 3( a$ F$ n" m! G: k0 b3 X5 K/ K
target_end += 3
# 计算需要paddning的长度, BERT是以固定长度进行输入的,因此对于不足的我们需要做pandding
padding_length = MAX_SEQUENCE_LENGTH - len(input_ids)
if padding_length > 0:
input_ids = input_ids + ([0] * padding_length)) Q% W" ~2 W; [8 V0 O/ p
attention_mask = attention_mask + ([0] * padding_length)
input_type_ids = input_type_ids + ([0] * padding_length), k+ S, H5 |' `0 r0 F0 A) f
offsets = offsets + ([(0, 0)] * padding_length)& s& m+ _& L3 r2 K
elif padding_length
定义数据加载器
% L+ T! d1 ~& W- k3 q8 \3 k
class TweetDataset(tf.data.Dataset):
outputTypes = (
tf.dtypes.int32, tf.dtypes.int32, tf.dtypes.int32,
tf.dtypes.int32, tf.dtypes.float32, tf.dtypes.float32,
tf.dtypes.string, tf.dtypes.string, tf.dtypes.string,4 U3 b3 B4 R( @( o
)4 G, Y8 i, P/ m
outputShapes = (
(128,), (128,), (128,),
(128, 2), (), (),
(), (), (),
)
; Y! @. D% y& K% R& ]
def _generator(tweet, selected_text, sentiment):/ F* \$ Y7 I9 T$ S: f4 V
for tw, st, se in zip(tweet, selected_text, sentiment):
yield preprocess(tw, st, se)
( @0 S6 S5 [) }0 h7 K
def __new__(cls, tweet, selected_text, sentiment):4 D' s! y! o" T, C
return tf.data.Dataset.from_generator(
cls._generator,4 S" l- F: {+ y O' Y) ]- \$ i+ c
output_types=cls.outputTypes,
output_shapes=cls.outputShapes,$ {4 r2 e6 s' |8 F# E! H
args=(tweet, selected_text, sentiment)
)
" `1 } x3 P$ z8 J, x
@staticmethod
def create(dataframe, batch_size, shuffle_buffer_size=-1):
dataset = TweetDataset(3 |6 w9 x) x- g L; T: F. L
dataframe.text.values,
dataframe.selected_text.values, $ a4 Q5 L6 R9 [& ~) V' V
dataframe.sentiment.values6 b' I3 h C0 m6 R, u1 [7 t6 F/ M
)# _0 K, o9 n; p; x
dataset = dataset.cache()
if shuffle_buffer_size != -1:/ {" e, U4 t. }3 h; ~
dataset = dataset.shuffle(shuffle_buffer_size)/ W" T/ ^7 R% z! v; l6 G6 O# [; i
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE). z8 W F4 o0 D3 p8 j/ q& ]
return dataset
定义模型+ v+ R- l0 N6 l/ f
我们使用BERT模型来进行这次竞赛,这里对BERT模型做一些简单的介绍。& y" B" y7 {1 F0 r$ n
BERT的全称是Bidirectional Encoder Representation from Transformers,即双向Transformer的Encoder,因为decoder是不能获要预测的信息的。
模型的主要创新点都在pre-train方法上,即用了Masked LM和Next Sentence Prediction两种方法分别捕捉词语和句子级别representation。
BERT主要特点如下:* q: H$ ^" ?& Z2 ^) J
& J/ H# h/ ~ v$ l& `9 x
使用了Transformer作为算法的主要框架,Trabsformer能更彻底的捕捉语句中的双向关系;! t& @' z' W8 C
使用了Mask Language Model 和 Next Sentence Prediction的多任务训练目标;
/ p" ~7 g$ X" @, ?1 n% H. c
使用更强大的机器训练更大规模的数据,Google开源了BERT模型,我们可以直接使用BERT作为Word2Vec的转换矩阵并高效的将其应用到自己的任务中。4 ~4 ?" e5 W' i& ?4 x
# | x5 k3 o5 s4 x/ E
BERT的本质是在海量的语料基础上,运行自监督学习方法让单词学习得到一个较好的特征表示。/ Z4 S5 @! v8 p) X
在之后特定任务中,可以直接使用BERT的特征表示作为该任务的词嵌入特征。所以BERT提供的是一个供其它任务迁移学习的模型,该模型可以根据任务微调或者固定之后作为特征提取器。
在竞赛中,我们定义了一个BertModel类,里面使用TFBertPreTrainedModel来进行推理。1 z# I" T9 }/ m1 q; W5 \( [
BERT的输出我们保存在hidden_states中,然后将这个得到的hidden_states结果在加入到Dense Layer,最后输出我们需要提取的表示情感的文字的起始位置跟结束位置。* o; W: [! \9 ?* P. [2 G
这两个位置信息就是我们需要从原文中提取的词向量的位置。 {2 L& `- S8 L- l! r m# X$ A
class BertModel(TFBertPreTrainedModel):
# drop out rate, 防止过拟合
dr = 0.1& o7 ?: l. B, X# T8 D' h9 Z* \
# hidden state数量
hs = 2
. p& U, H2 x0 F
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.bert = TFBertMainLayer(config, name="bert")$ A# [( Q, n" `! P
self.concat = L.Concatenate()
self.dropout = L.Dropout(self.dr)% t- P9 E7 e& v; j- j \" w$ U+ X
self.qa_outputs = L.Dense(' t; K% @2 p/ Y3 E/ L( z/ F
config.num_labels,
kernel_initializer=TruncatedNormal(stddev=config.initializer_range),
dtype='float32',
name="qa_outputs")) [5 E* A& |; l' z
; p3 q* l8 A2 K1 O0 U
@tf.function
def call(self, inputs, **kwargs):
_, _, hidden_states = self.bert(inputs, **kwargs)4 d5 D! L" X+ b" G$ J8 j2 e- n" X
# L) u9 G/ s/ p0 r- |2 s# j" x+ s3 N
hidden_states = self.concat([
hidden_states[-i] for i in range(1, self.hs+1)3 q6 t6 g% l1 O; a$ y( P
])
8 m" |, Y3 X: j$ M
hidden_states = self.dropout(hidden_states, training=kwargs.get("training", False))( g4 ^9 V( B% @7 P" g
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = tf.split(logits, 2, axis=-1)8 K1 x: E0 N3 L8 ?
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)4 A t6 f( K% [% j6 j9 i# F
return start_logits, end_logits
定义训练函数! k. G2 k1 Y2 e# v
1 g7 O$ A% T; A$ J8 N% O- {5 O
def train(model, dataset, loss_fn, optimizer):% D+ B% T/ Q) ?/ _/ x4 b9 t
8 R& J& s! X4 h- A" p; f& ~8 o
@tf.function
def train_step(model, inputs, y_true, loss_fn, optimizer):
with tf.GradientTape() as tape:
y_pred = model(inputs, training=True)
loss = loss_fn(y_true[0], y_pred[0])
loss += loss_fn(y_true[1], y_pred[1])
scaled_loss = optimizer.get_scaled_loss(loss)
6 N* v9 a' A2 B! J$ Q
scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables)
gradients = optimizer.get_unscaled_gradients(scaled_gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, y_pred0 R6 U5 }* | C
epoch_loss = 0.: F& L2 j, a, p W* O, N* k2 w
for batch_num, sample in enumerate(dataset):! E: i. y/ P+ S- o1 v9 Z
loss, y_pred = train_step(model, sample[:3], sample[4:6], loss_fn, optimizer)5 l" B k0 Y+ j3 B" q$ t
epoch_loss += loss
print(- B! c0 M A+ i# q0 D ~
f"training ... batch {batch_num+1:03d} : "
f"train loss {epoch_loss/(batch_num+1):.3f} ",
end='\r') y+ W U1 ]7 |* Z: c( d4 w: s
定义预制函数
def predict(model, dataset, loss_fn, optimizer):
6 E" R: Z7 _+ `* S, q0 a
@tf.function' f( g0 r2 q1 l2 f0 Z; q b8 C
def predict_step(model, inputs):
return model(inputs); @% q3 i+ s: \4 X
$ A$ ~ J6 m4 v/ G$ W
def to_numpy(*args):
out = []2 N6 z* a: V; W# ]* u
for arg in args:5 I1 E% _5 x) H6 U8 j: P" `8 e9 m
if arg.dtype == tf.string:
arg = [s.decode('utf-8') for s in arg.numpy()]
out.append(arg)
else:/ H. ?. ^) R5 Z2 v4 U* u
arg = arg.numpy()
out.append(arg)
return out
/ q: y* Q' |. d
offset = tf.zeros([0, 128, 2], dtype=tf.dtypes.int32)
text = tf.zeros([0,], dtype=tf.dtypes.string)+ D* D1 e- T* F/ c
selected_text = tf.zeros([0,], dtype=tf.dtypes.string)9 x( D9 H8 F- \7 V) Z
sentiment = tf.zeros([0,], dtype=tf.dtypes.string)
pred_start = tf.zeros([0, 128], dtype=tf.dtypes.float32)
pred_end = tf.zeros([0, 128], dtype=tf.dtypes.float32)
' D0 Q6 _! N2 A. s# g+ E8 d
for batch_num, sample in enumerate(dataset):
0 l3 b1 S1 F* l! |) X
print(f"predicting ... batch {batch_num+1:03d}"+" "*20, end='\r')+ d* p) z8 f x# e
y_pred = predict_step(model, sample[:3])! O6 x* @3 c2 W5 n2 x# j6 R
# add batch to accumulators, b' V |- }9 L" v' s' e
pred_start = tf.concat((pred_start, y_pred[0]), axis=0)
pred_end = tf.concat((pred_end, y_pred[1]), axis=0)& Q; m3 `1 h1 h
offset = tf.concat((offset, sample[3]), axis=0)2 m/ R2 d& c; W( ~* @
text = tf.concat((text, sample[6]), axis=0)
selected_text = tf.concat((selected_text, sample[7]), axis=0)
sentiment = tf.concat((sentiment, sample[8]), axis=0)- N a3 j, ]# b% @& \( c" I
pred_start, pred_end, text, selected_text, sentiment, offset = \
to_numpy(pred_start, pred_end, text, selected_text, sentiment, offset)6 S" S/ S8 [7 w0 O' |1 e
) S8 i1 l# H& \7 N
return pred_start, pred_end, text, selected_text, sentiment, offset
判断函数3 {: B# h) |* r
这个竞赛采用单词级Jaccard系数,计算公式如下
Jaccard系数计算的是你预测的单词在数据集中的个数,+ P9 q& [$ {2 Y$ }
def jaccard(str1, str2):
a = set(str1.lower().split())
b = set(str2.lower().split())( n* ^/ t0 N" E6 R4 c" }' \
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
定义预测结果解码函数
解码函数通过模型预测拿到的start和end的index位置信息,然后和之前拿到的词向量在样本句子中的位置进行比较,将这个区间内的所有的单词都提取出来作为我们的预测结果。
def decode_prediction(pred_start, pred_end, text, offset, sentiment):
: k4 y; M. o6 ?0 |7 U; |
def decode(pred_start, pred_end, text, offset):
decoded_text = ""
for i in range(pred_start, pred_end+1):
decoded_text += text[offset[0]:offset[1]]" ]1 j! p1 V+ A. Z7 }
if (i+1) idx_end:4 _ Y. T$ w9 E6 K
idx_end = idx_start 6 e6 B: j' I& \2 H0 H" [
decoded_text = str(decode(idx_start, idx_end, text, offset))
if len(decoded_text) == 0:- m( [' c8 W! w$ `" d
decoded_text = text
decoded_predictions.append(decoded_text)* x# U: |8 r0 r& `3 }! j- v7 [# \* y
2 P* P4 S, q9 a/ ]4 Q. l& I, o. T( t
return decoded_predictions3 K# W2 X9 T" u1 l
开始训练
将训练数据分成5个folds,每个folds训练5个epoch,使用adam优化器,learning rate设置成3e-5,batch size使用32。
/ O5 R2 J& A; k- K7 g
num_folds = 5) N m. P6 D! J; Z% y1 u
num_epochs = 5! \4 l8 I3 o0 ? B1 f$ q9 ^
batch_size = 32. M" H" b3 q' n) @( s& G3 Y1 s: [
learning_rate = 3e-5) z* p0 [* e. L! I
optimizer = tf.keras.optimizers.Adam(learning_rate)
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
optimizer, 'dynamic')
config = BertConfig(output_hidden_states=True, num_labels=2)! O# ^, H. `5 {* I$ b8 x% |$ P& j
model = BertModel.from_pretrained(PATH, config=config)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
kfold = model_selection.KFold(6 G3 \1 q8 o) M y% X
n_splits=num_folds, shuffle=True, random_state=42)
test_preds_start = np.zeros((len(test_df), 128), dtype=np.float32), F7 j4 x% B+ v/ m0 V+ B
test_preds_end = np.zeros((len(test_df), 128), dtype=np.float32); B: H5 |: F7 {
for fold_num, (train_idx, valid_idx) in enumerate(kfold.split(train_df.text)):0 R" v3 j# q& k5 i' P
print("\nfold %02d" % (fold_num+1))
# 创建train, valid, test数据集
train_dataset = TweetDataset.create(
train_df.iloc[train_idx], batch_size, shuffle_buffer_size=2048)
valid_dataset = TweetDataset.create(& D" r; K) ^. d1 C5 i+ `
train_df.iloc[valid_idx], batch_size, shuffle_buffer_size=-1)6 g9 M' n3 M) X& t
test_dataset = TweetDataset.create() W* B4 h7 J; a; R
test_df, batch_size, shuffle_buffer_size=-1)# |4 {* X& Z# Q3 F% q& [ @
: G7 L6 h% {) `9 P( d
best_score = float('-inf')
for epoch_num in range(num_epochs):, \* o0 Y! B# S
print("\nepoch %03d" % (epoch_num+1))+ `$ l9 \, X% J0 s4 k5 S
train(model, train_dataset, loss_fn, optimizer)
pred_start, pred_end, text, selected_text, sentiment, offset = \
predict(model, valid_dataset, loss_fn, optimizer)
selected_text_pred = decode_prediction(
pred_start, pred_end, text, offset, sentiment)
jaccards = []
for i in range(len(selected_text)):- ?4 c2 m+ k7 I8 l. E. b z; w* I
jaccards.append(
jaccard(selected_text, selected_text_pred))
score = np.mean(jaccards)
print(f"valid jaccard epoch {epoch_num+1:03d}: {score}"+" "*15)
) I3 o6 m" g3 L6 e
if score > best_score:
best_score = score
& w: Q* p. `+ b6 a! g6 p# J
# predict test set
test_pred_start, test_pred_end, test_text, _, test_sentiment, test_offset = \
predict(model, test_dataset, loss_fn, optimizer)) H5 E7 r- n3 T# m9 r- n
test_preds_start += test_pred_start * 0.2( Z9 P: |8 z* v3 S
test_preds_end += test_pred_end * 0.2
" H# @ L- D9 P7 H
# 重置模型,避免OOM
session = tf.compat.v1.get_default_session()
graph = tf.compat.v1.get_default_graph()
del session, graph, model1 H/ e$ r) U0 ^* {" @: S
model = BertModel.from_pretrained(PATH, config=config)( f# h) U- d5 E- t- i t
预测测试数据,并生成提交文件" I& O! l5 g' k9 M% l& {: t g# t
selected_text_pred = decode_prediction(
test_preds_start, test_preds_end, test_text, test_offset, test_sentiment)
def f(selected):
return " ".join(set(selected.lower().split()))
submission_df.loc[:, 'selected_text'] = selected_text_pred: Q/ `; O: H/ J+ w+ |
submission_df['selected_text'] = submission_df['selected_text'].map(f)& B) |. e$ \2 a8 b- E7 g4 B: ?
submission_df.to_csv("submission.csv", index=False): {+ X7 ]9 |2 I) Q6 B6 {2 f
这个方案在提交的时候在553个队伍中排名153位, 分数为0.68。" u) [ ]+ f/ c
成为第一个吐槽的人