使用预训练的模型BERT来完成对整个竞赛的数据分析
lf0517
发表于 2023-1-14 22:04:15
1075
0
0
导入需要的库; H+ c1 e( Z% K# @; Y
import numpy as np: p" q+ y; G: q& v5 R; [* S
import pandas as pd
from math import ceil, floor
import tensorflow as tf* s5 _# _7 d# s1 ]. Z1 y" |3 V$ S
import tensorflow.keras.layers as L
from tensorflow.keras.initializers import TruncatedNormal
from sklearn import model_selection6 U5 q1 M9 f: y1 m$ I
from transformers import BertConfig, TFBertPreTrainedModel, TFBertMainLayer8 y( K6 Q, \% ^# b
from tokenizers import BertWordPieceTokenizer7 ~% X# N. m/ ^! L1 I& M
读取并解释数据& V6 q; Q0 B" R3 g" L6 ~
在竞赛中,对数据的理解是非常关键的。因此我们首先要做的就是读取数据,然后查看数据的内容以及特点。
先用pandas来读取csv数据,: D: q Q& q5 H% R: y
4 {4 D+ i/ R8 Y: I1 V3 s7 V' X! `
train_df = pd.read_csv('train.csv')1 g4 \# `, I5 Y9 X. d" J0 y
train_df.dropna(inplace=True) ?! u; p/ s- N2 _; e' Q
test_df = pd.read_csv('test.csv')
test_df.loc[:, "selected_text"] = test_df.text.values* D5 l- H3 e+ W. f/ k
submission_df = pd.read_csv('sample_submission.csv')
再查看下我们的数据的数量,我们一共有27485条训练数据,3535条测试数据, s7 k- F) K! H9 g' x/ T1 L' h
print("train numbers =", train_df.shape)7 ~# W* ]7 X, v- M. {& K
print("test numbers =", test_df.shape)
紧接着查看训练数据和测试数据前10条表单的字段跟数据,表单中包含了一下几个数据字段:. ~* q/ o" K2 s6 f1 n: J2 d
$ U" w- l X/ f+ x' y( h
textID: 文本数据记录的唯一ID;
text: 原始语句;
selected_text: 表示情感的语句;
' Z9 E2 o0 f r, G, t
sentiment: 情感类型, neutral中立, positive积极, negative消极;/ J& o( w0 u& q
6 A( @% d: e* v. T# V D7 {3 v
从数据中我们可以得出,目标就是根据现有的情感从原本是的语句中选出能代表这个情感的语句部分。
train_df.head(10)0 U: d6 a# H9 {0 c7 \" v$ M* t
test_df.head(10)# R+ t8 w7 T0 D- Z
定义常量
# bert预训练权重跟数据存放的目录
PATH = "./bert-base-uncased/"
# 语句最大长度
MAX_SEQUENCE_LENGTH = 1284 w5 [6 b9 s7 y, W, [* c. L/ F0 n
载入词向量
BERT是依据一个固定的词向量来进行训练的。因此在竞赛中需要先使用BertWordPieceTokenizer来加载这些词向量,其中的lowercase=True表示所有的词向量都是小写。设置大小写不敏感可以减少模型对资源的占用。
TOKENIZER = BertWordPieceTokenizer(f"{PATH}/vocab.txt", lowercase=True)
定义数据加载器
定义数据预处理函数
3 G- Q: G& N: B: C2 f0 {, \4 {/ t
def preprocess(tweet, selected_text, sentiment):
" y; t* S- K% |3 h5 P
# 将被转成byte string的原始字符串转成utf-8的字符串
tweet = tweet.decode('utf-8')
selected_text = selected_text.decode('utf-8')% m' N; Q% p# L8 M
sentiment = sentiment.decode('utf-8')
tweet = " ".join(str(tweet).split())
selected_text = " ".join(str(selected_text).split())
# 标记出selected text和text共有的单词4 b2 L4 n$ z( L4 D# x. O* n, g4 P6 s! V
idx_start, idx_end = None, None2 e+ G2 {) H1 \! E$ N' D
for index in (i for i, c in enumerate(tweet) if c == selected_text[0]):
if tweet[index:index+len(selected_text)] == selected_text:
idx_start = index
idx_end = index + len(selected_text)
break
intersection = [0] * len(tweet)
if idx_start != None and idx_end != None:; D6 a! A G4 e0 [# Y6 q) x0 N
for char_idx in range(idx_start, idx_end):; k: a9 S/ y0 j8 x Y9 Z( S: W
intersection[char_idx] = 1; e/ J4 u F( u( G# n
( A( X8 L6 P) Y
# 对原始数据用词向量进行编码, 这里会返回原始数据中的词在词向量中的下标
# 和原始数据中每个词向量的单词在文中的起始位置跟结束位置
enc = TOKENIZER.encode(tweet)
input_ids_orig, offsets = enc.ids, enc.offsets
target_idx = []
for i, (o1, o2) in enumerate(offsets):
if sum(intersection[o1: o2]) > 0:
target_idx.append(i)
target_start = target_idx[0]
target_end = target_idx[-1]6 x0 R+ h' Y8 K' b6 h; D* F7 v% M( Q
sentiment_map = {( g) [+ ?: D2 U B
'positive': 3893,+ I- D. j! t2 o6 S/ Z, j& X# d
'negative': 4997,& Z4 y- S. I* J8 p' E
'neutral': 8699,! ]4 w) S4 D/ c/ y+ _
}
# 将情感标签和原始的语句的词向量组合在一起组成我们新的数据7 i3 d+ ^) J& f* H1 {* v7 N2 z
input_ids = [101] + [sentiment_map[sentiment]] + [102] + input_ids_orig + [102]
input_type_ids = [0] * (len(input_ids_orig) + 4)
attention_mask = [1] * (len(input_ids_orig) + 4), j% ^- p8 r) D8 ]/ E; A
offsets = [(0, 0), (0, 0), (0, 0)] + offsets + [(0, 0)]" _5 ]* L2 O8 {- c% _' d2 q
target_start += 3
target_end += 3
# 计算需要paddning的长度, BERT是以固定长度进行输入的,因此对于不足的我们需要做pandding3 y& t% V7 b3 m& E$ a
padding_length = MAX_SEQUENCE_LENGTH - len(input_ids); |: v3 Q# L3 z! U- N
if padding_length > 0:
input_ids = input_ids + ([0] * padding_length)6 @6 j( e& ^+ r% k) I
attention_mask = attention_mask + ([0] * padding_length)0 Z" z; }1 R8 }2 x
input_type_ids = input_type_ids + ([0] * padding_length)
offsets = offsets + ([(0, 0)] * padding_length)
elif padding_length
定义数据加载器
class TweetDataset(tf.data.Dataset):
outputTypes = (
tf.dtypes.int32, tf.dtypes.int32, tf.dtypes.int32,
tf.dtypes.int32, tf.dtypes.float32, tf.dtypes.float32,$ Y1 a- r7 r, C1 N! b
tf.dtypes.string, tf.dtypes.string, tf.dtypes.string,
)+ F1 k: \4 o/ C+ m
outputShapes = (: G ^8 `- f7 Z9 W
(128,), (128,), (128,),
(128, 2), (), (), _4 U; ^9 }9 O2 P! \0 p
(), (), (),
)* L; k" L' P' c! X* B2 O+ Y9 H# Y
def _generator(tweet, selected_text, sentiment):' `3 ~2 q2 M) i
for tw, st, se in zip(tweet, selected_text, sentiment):
yield preprocess(tw, st, se)3 n' L7 n/ e5 x0 g% F8 E$ K
def __new__(cls, tweet, selected_text, sentiment):
return tf.data.Dataset.from_generator(
cls._generator,
output_types=cls.outputTypes,3 O2 @- z! I" J/ V
output_shapes=cls.outputShapes,
args=(tweet, selected_text, sentiment). U- h" V S+ d7 m) R. F0 |
)/ v2 G' T3 b) n) N+ w% p
@staticmethod
def create(dataframe, batch_size, shuffle_buffer_size=-1):6 l! V1 y( R8 \5 o8 x
dataset = TweetDataset(
dataframe.text.values,
dataframe.selected_text.values,
dataframe.sentiment.values( T) _1 P9 ~3 i) l; a6 @8 O1 N2 F
)6 A; W+ }& G1 F( r' v" T3 P8 \
dataset = dataset.cache()
if shuffle_buffer_size != -1:
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(batch_size)$ |1 h4 `6 a5 L& B2 Z2 y
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)# }) S6 x# Z7 f, q7 G; ^
return dataset
定义模型) Z9 y$ _! N q" A: Y* D+ r8 S
我们使用BERT模型来进行这次竞赛,这里对BERT模型做一些简单的介绍。
BERT的全称是Bidirectional Encoder Representation from Transformers,即双向Transformer的Encoder,因为decoder是不能获要预测的信息的。
模型的主要创新点都在pre-train方法上,即用了Masked LM和Next Sentence Prediction两种方法分别捕捉词语和句子级别representation。
BERT主要特点如下:: {8 g+ y6 V4 P2 G
使用了Transformer作为算法的主要框架,Trabsformer能更彻底的捕捉语句中的双向关系;
使用了Mask Language Model 和 Next Sentence Prediction的多任务训练目标;
使用更强大的机器训练更大规模的数据,Google开源了BERT模型,我们可以直接使用BERT作为Word2Vec的转换矩阵并高效的将其应用到自己的任务中。6 S/ s9 K; h. m1 b% O+ r
- s1 x6 `3 {1 }$ g* q( `$ h" Y
BERT的本质是在海量的语料基础上,运行自监督学习方法让单词学习得到一个较好的特征表示。' h: x8 i, d) e! \% K/ H; @
在之后特定任务中,可以直接使用BERT的特征表示作为该任务的词嵌入特征。所以BERT提供的是一个供其它任务迁移学习的模型,该模型可以根据任务微调或者固定之后作为特征提取器。
在竞赛中,我们定义了一个BertModel类,里面使用TFBertPreTrainedModel来进行推理。
BERT的输出我们保存在hidden_states中,然后将这个得到的hidden_states结果在加入到Dense Layer,最后输出我们需要提取的表示情感的文字的起始位置跟结束位置。
这两个位置信息就是我们需要从原文中提取的词向量的位置。- q# V+ @+ M! P9 Y7 I" O
class BertModel(TFBertPreTrainedModel):0 P+ K# o$ w# \9 C! I. m
# drop out rate, 防止过拟合% d2 p5 {2 O N3 Y, Z* G1 K
dr = 0.1% D0 J9 N5 ~" O9 b+ M) U+ t
# hidden state数量
hs = 2( i" \' V6 ]/ l% d
! g8 S/ S K( s1 v" C' }* G6 s# u! v. b7 M
def __init__(self, config, *inputs, **kwargs):% C! y$ ] z+ E# E' W. ]) i8 P
super().__init__(config, *inputs, **kwargs)) Y$ Q9 R$ u1 d* i
6 r; H% g5 D. T2 |# M) Z) F
self.bert = TFBertMainLayer(config, name="bert")
self.concat = L.Concatenate()
self.dropout = L.Dropout(self.dr)$ H( N n% y( o+ P' x C( D
self.qa_outputs = L.Dense(
config.num_labels,
kernel_initializer=TruncatedNormal(stddev=config.initializer_range),) O# s h* I* X% `6 L5 v! g( Y
dtype='float32',
name="qa_outputs")
, z: f X/ N( U- Q
@tf.function$ Z! m/ l9 f `! `2 p' p! k
def call(self, inputs, **kwargs):
_, _, hidden_states = self.bert(inputs, **kwargs)
hidden_states = self.concat([
hidden_states[-i] for i in range(1, self.hs+1)5 s! ~4 i$ U5 a' F
])
/ g" Q; l3 {# N3 F5 t
hidden_states = self.dropout(hidden_states, training=kwargs.get("training", False))* _* }& m \0 z6 K# ?- \
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)- C4 ?$ N# z! G4 X
end_logits = tf.squeeze(end_logits, axis=-1)& b4 l5 y7 S0 {2 a
- B& p/ T: N1 a, x
return start_logits, end_logits
定义训练函数
* {* [2 i! [, q) u2 |8 K
def train(model, dataset, loss_fn, optimizer):
@tf.function+ A0 l% R- _$ Y6 {: ~1 b
def train_step(model, inputs, y_true, loss_fn, optimizer):
with tf.GradientTape() as tape:! F2 Q% |" h0 z/ o }; u6 V
y_pred = model(inputs, training=True)
loss = loss_fn(y_true[0], y_pred[0])
loss += loss_fn(y_true[1], y_pred[1])
scaled_loss = optimizer.get_scaled_loss(loss)' c( X7 F0 \0 k: t9 E/ l, I0 u, v# C
scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables)
gradients = optimizer.get_unscaled_gradients(scaled_gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss, y_pred
epoch_loss = 0.
for batch_num, sample in enumerate(dataset):
loss, y_pred = train_step(model, sample[:3], sample[4:6], loss_fn, optimizer)- _8 S& D3 Z& C& m" Y j# A! f) h
epoch_loss += loss( B) R: _ a% \% b' i
print(4 U, y3 m9 M8 W# n
f"training ... batch {batch_num+1:03d} : "# ]9 Z& H* G/ X( T6 Y4 N3 e
f"train loss {epoch_loss/(batch_num+1):.3f} ",2 g0 y' \( J0 o* ?9 Z
end='\r')
定义预制函数
def predict(model, dataset, loss_fn, optimizer):: m% |+ ]/ V [+ y
@tf.function5 {+ o0 v- O' h& Y, c
def predict_step(model, inputs):
return model(inputs)' q7 s6 |2 J) o5 r: A, {' }0 ^
def to_numpy(*args):
out = []( Y# B$ T, ^( P1 U+ W- u
for arg in args:% e) |6 j, }6 |% C% c9 Q
if arg.dtype == tf.string:7 r5 [$ ~4 W; c
arg = [s.decode('utf-8') for s in arg.numpy()]
out.append(arg)3 |+ _3 [ c# {( G+ S7 v
else:
arg = arg.numpy()
out.append(arg) X) X$ H) k( Y! H0 B
return out
offset = tf.zeros([0, 128, 2], dtype=tf.dtypes.int32)6 O4 W0 V+ V" [4 R; @0 Y
text = tf.zeros([0,], dtype=tf.dtypes.string): ]- z4 O/ s/ U* u& C) `/ r
selected_text = tf.zeros([0,], dtype=tf.dtypes.string)0 k' [% y, r2 k1 }8 x } {, x
sentiment = tf.zeros([0,], dtype=tf.dtypes.string)
pred_start = tf.zeros([0, 128], dtype=tf.dtypes.float32), C+ |! Y( y* \/ e. l
pred_end = tf.zeros([0, 128], dtype=tf.dtypes.float32)) K6 K; Z) T, N# d2 J$ X) Z# f% B
for batch_num, sample in enumerate(dataset):) s9 C2 Z) z1 H3 i+ ?. w4 r
; P7 c" c: E# S
print(f"predicting ... batch {batch_num+1:03d}"+" "*20, end='\r')
f* a2 {& L; @1 [; V% l+ N
y_pred = predict_step(model, sample[:3])
# add batch to accumulators
pred_start = tf.concat((pred_start, y_pred[0]), axis=0)0 b C* L- F+ n/ m g
pred_end = tf.concat((pred_end, y_pred[1]), axis=0)# i F: J. j( A+ i+ f
offset = tf.concat((offset, sample[3]), axis=0)
text = tf.concat((text, sample[6]), axis=0)2 D- a: ]0 U' w* T+ s* _
selected_text = tf.concat((selected_text, sample[7]), axis=0)
sentiment = tf.concat((sentiment, sample[8]), axis=0). A$ V8 ]/ u- r' b4 y0 G" z" w
, X+ s" B" g% k( ^
pred_start, pred_end, text, selected_text, sentiment, offset = \ T& o$ B: F( }4 W; B
to_numpy(pred_start, pred_end, text, selected_text, sentiment, offset)9 f, v- k% {8 _
return pred_start, pred_end, text, selected_text, sentiment, offset, {) n, q! k$ r" _% n% k; {0 c
判断函数7 H3 w) i7 ^" G+ ]- u" r3 w
这个竞赛采用单词级Jaccard系数,计算公式如下# y" [- W& J' d
1 a/ X# ]- e$ o7 i) e
Jaccard系数计算的是你预测的单词在数据集中的个数,
def jaccard(str1, str2):1 n/ k$ T6 U0 b9 Y4 r
a = set(str1.lower().split())/ ` g, K1 w p2 d6 W: S2 `; U, }1 d
b = set(str2.lower().split())
c = a.intersection(b)% s# {" }/ g; {) U+ |
return float(len(c)) / (len(a) + len(b) - len(c))# i* N- O8 e9 L4 d
定义预测结果解码函数) [3 p% W% z: U5 X4 U" b
解码函数通过模型预测拿到的start和end的index位置信息,然后和之前拿到的词向量在样本句子中的位置进行比较,将这个区间内的所有的单词都提取出来作为我们的预测结果。
+ H0 Q u3 i3 i6 [0 V. s- l' ~
def decode_prediction(pred_start, pred_end, text, offset, sentiment):+ Q) N8 y. X( d6 K
def decode(pred_start, pred_end, text, offset):) v6 n- S, z& ]# y S
decoded_text = ""
for i in range(pred_start, pred_end+1):
decoded_text += text[offset[0]:offset[1]]! s U; U [) e h
if (i+1) idx_end:. G; F- r0 L/ U# f
idx_end = idx_start
decoded_text = str(decode(idx_start, idx_end, text, offset))1 n# o. t4 s3 z& C- l- N( T
if len(decoded_text) == 0:, G$ | u1 N" F3 m2 I8 M0 \. z
decoded_text = text
decoded_predictions.append(decoded_text)5 F! Y1 `" d; T. {
return decoded_predictions+ u7 N5 M2 |! ` F
开始训练
将训练数据分成5个folds,每个folds训练5个epoch,使用adam优化器,learning rate设置成3e-5,batch size使用32。( n+ a/ a7 y% r" Z
2 a: u; Z+ |( v+ G# F3 S
num_folds = 5
num_epochs = 5
batch_size = 325 @6 | v: s3 ^- w' u/ v& _
learning_rate = 3e-5! P# w* t+ {6 q1 {- O7 [9 O) b
optimizer = tf.keras.optimizers.Adam(learning_rate)
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
optimizer, 'dynamic')
config = BertConfig(output_hidden_states=True, num_labels=2)8 F1 K$ f& r: C, ?
model = BertModel.from_pretrained(PATH, config=config)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)4 X1 M) l; y: B: e8 r% P
kfold = model_selection.KFold(
n_splits=num_folds, shuffle=True, random_state=42)' h6 M, F, U5 K T& D3 U
test_preds_start = np.zeros((len(test_df), 128), dtype=np.float32)0 u" ?" C/ k7 V/ s$ ?
test_preds_end = np.zeros((len(test_df), 128), dtype=np.float32)& T+ w3 ]/ s7 p& {8 s
for fold_num, (train_idx, valid_idx) in enumerate(kfold.split(train_df.text)):: }* O9 t0 V' O( Q
print("\nfold %02d" % (fold_num+1))
# 创建train, valid, test数据集
train_dataset = TweetDataset.create(
train_df.iloc[train_idx], batch_size, shuffle_buffer_size=2048)/ ^5 z) C) f9 o( U4 D9 ], Y F
valid_dataset = TweetDataset.create(& {2 R* r% \9 Y6 o2 u. E
train_df.iloc[valid_idx], batch_size, shuffle_buffer_size=-1)
test_dataset = TweetDataset.create(6 @) P2 {. c; r- h
test_df, batch_size, shuffle_buffer_size=-1)* z, Z& r, E/ E1 b+ o- p* o# e
" R. w; q6 H/ p8 `
best_score = float('-inf')
for epoch_num in range(num_epochs):
print("\nepoch %03d" % (epoch_num+1)), S w" `; i, E0 v
train(model, train_dataset, loss_fn, optimizer)9 K* o+ ~% q9 P' y& ]% z
; ]0 U& K1 R% p
pred_start, pred_end, text, selected_text, sentiment, offset = \+ R8 h; j9 q; Z' h8 ?1 N4 }* x
predict(model, valid_dataset, loss_fn, optimizer)' k8 B6 S0 y- u9 b8 p
; x3 i: a( ^1 q; B
selected_text_pred = decode_prediction(
pred_start, pred_end, text, offset, sentiment)
jaccards = []
for i in range(len(selected_text)):
jaccards.append(
jaccard(selected_text, selected_text_pred))
score = np.mean(jaccards)' r6 S9 O6 K9 l( k6 c
print(f"valid jaccard epoch {epoch_num+1:03d}: {score}"+" "*15)" Y! D+ C2 |9 _; W' V
" S4 A: Y1 C( f
if score > best_score:$ ] B) E1 |9 r7 [: c1 e
best_score = score9 Y- O6 Q1 m3 l2 h0 _6 C2 w3 Z6 I
# predict test set9 y" e9 B, ?6 y9 P- f& \7 Y
test_pred_start, test_pred_end, test_text, _, test_sentiment, test_offset = \8 f/ o) q8 u4 `* c
predict(model, test_dataset, loss_fn, optimizer)$ X) D( e# G( B% p. B( I8 v1 Q
( D6 o0 ]7 T# _! @: K3 \; \1 O
test_preds_start += test_pred_start * 0.2
test_preds_end += test_pred_end * 0.21 h+ y' w D, Z+ j$ `" K: C
$ T O4 h$ @ h% k" T
# 重置模型,避免OOM" O. e# E/ |% i8 Y3 X. {/ e- f
session = tf.compat.v1.get_default_session()
graph = tf.compat.v1.get_default_graph(), M7 s/ ` J( h( h$ L
del session, graph, model
model = BertModel.from_pretrained(PATH, config=config)
预测测试数据,并生成提交文件
selected_text_pred = decode_prediction(
test_preds_start, test_preds_end, test_text, test_offset, test_sentiment)
def f(selected):
return " ".join(set(selected.lower().split())) \+ R z ^7 i+ r
submission_df.loc[:, 'selected_text'] = selected_text_pred/ V- _+ b( K) v- G
submission_df['selected_text'] = submission_df['selected_text'].map(f)# b7 ]- e: k' I* ^
submission_df.to_csv("submission.csv", index=False). U' I5 f2 A+ P
这个方案在提交的时候在553个队伍中排名153位, 分数为0.68。9 m; R" I% j0 h/ M2 {: o: d
成为第一个吐槽的人