使用预训练的模型BERT来完成对整个竞赛的数据分析
lf0517
发表于 2023-1-14 22:04:15
977
0
0
导入需要的库* V' l3 ~5 @0 y
import numpy as np
import pandas as pd
from math import ceil, floor
import tensorflow as tf
import tensorflow.keras.layers as L
from tensorflow.keras.initializers import TruncatedNormal/ z/ R* }+ k: q
from sklearn import model_selection
from transformers import BertConfig, TFBertPreTrainedModel, TFBertMainLayer
from tokenizers import BertWordPieceTokenizer2 ]1 H( p; u: I
读取并解释数据
在竞赛中,对数据的理解是非常关键的。因此我们首先要做的就是读取数据,然后查看数据的内容以及特点。
先用pandas来读取csv数据,
& ~! z9 b7 C2 m: H- M! I3 q; ^
train_df = pd.read_csv('train.csv')
train_df.dropna(inplace=True)
test_df = pd.read_csv('test.csv')
test_df.loc[:, "selected_text"] = test_df.text.values* w* X- A% k: h/ C' Y
submission_df = pd.read_csv('sample_submission.csv')
再查看下我们的数据的数量,我们一共有27485条训练数据,3535条测试数据,
print("train numbers =", train_df.shape)+ Y2 f' ~& A/ ^! V
print("test numbers =", test_df.shape), T' a1 [5 m, i3 P" G
紧接着查看训练数据和测试数据前10条表单的字段跟数据,表单中包含了一下几个数据字段:
6 V0 a: K! C/ a G: l7 m
textID: 文本数据记录的唯一ID;# O6 l! T6 g( H' @/ J9 {) B1 k
+ T! `! \3 h( {) D* g& {
text: 原始语句;
) Z( Q9 }( @* `8 F: W4 I6 ?! }# ]
selected_text: 表示情感的语句;
/ {+ X6 @/ T5 B9 H4 R
sentiment: 情感类型, neutral中立, positive积极, negative消极;( C: e& o* T0 M7 u& I( I
从数据中我们可以得出,目标就是根据现有的情感从原本是的语句中选出能代表这个情感的语句部分。8 d& T% \$ I$ z# {8 C( R$ L5 `
train_df.head(10)
test_df.head(10)* D- F( H9 z( L7 p- N; s
定义常量- H# c0 c% m0 s# a k; \
# bert预训练权重跟数据存放的目录
PATH = "./bert-base-uncased/"
# 语句最大长度
MAX_SEQUENCE_LENGTH = 128, C+ _# g2 F! ^7 ~2 g
载入词向量
BERT是依据一个固定的词向量来进行训练的。因此在竞赛中需要先使用BertWordPieceTokenizer来加载这些词向量,其中的lowercase=True表示所有的词向量都是小写。设置大小写不敏感可以减少模型对资源的占用。8 e3 f' c7 b' G# W- j; F5 ?
TOKENIZER = BertWordPieceTokenizer(f"{PATH}/vocab.txt", lowercase=True)
定义数据加载器6 {6 U, F8 m2 V' k- D& E
定义数据预处理函数
4 F# v& u, f6 m+ l* `! o; a# f( w6 K
def preprocess(tweet, selected_text, sentiment):
# 将被转成byte string的原始字符串转成utf-8的字符串4 w z3 J1 i9 o4 l
tweet = tweet.decode('utf-8')& Q$ i( t ]3 B+ G. N
selected_text = selected_text.decode('utf-8')) r* P# j/ E" ], ?" a# Z
sentiment = sentiment.decode('utf-8') {5 d/ F7 _ o- U# S7 E; e0 `
tweet = " ".join(str(tweet).split()), I; i/ M) r# Y1 i6 W/ Z
selected_text = " ".join(str(selected_text).split())
7 d) y" S" W9 l9 w9 a/ J% K6 F. l
# 标记出selected text和text共有的单词& X. C7 r0 Q- _; m1 Y3 b
idx_start, idx_end = None, None
for index in (i for i, c in enumerate(tweet) if c == selected_text[0]):
if tweet[index:index+len(selected_text)] == selected_text:* C/ l- h" x$ V8 U7 u! y' a
idx_start = index
idx_end = index + len(selected_text)
break4 s ? D* n- R- V
intersection = [0] * len(tweet)
if idx_start != None and idx_end != None:
for char_idx in range(idx_start, idx_end):
intersection[char_idx] = 1" v, U4 z1 m3 @. B, w7 H$ }9 F
# 对原始数据用词向量进行编码, 这里会返回原始数据中的词在词向量中的下标
# 和原始数据中每个词向量的单词在文中的起始位置跟结束位置6 S1 J+ {0 \; X% o+ B
enc = TOKENIZER.encode(tweet)2 {+ G" U7 s' X* z1 I
input_ids_orig, offsets = enc.ids, enc.offsets6 w: w- U& n( q3 i; ?# T: d
target_idx = []/ N# J& W( ^6 ^1 _0 \
for i, (o1, o2) in enumerate(offsets):' u& m1 L' |7 [! u
if sum(intersection[o1: o2]) > 0:
target_idx.append(i)4 R3 q- T/ e7 r6 {7 h5 ]
target_start = target_idx[0]: B4 g3 i, T& G! u5 F; }) d
target_end = target_idx[-1]
sentiment_map = {
'positive': 3893,
'negative': 4997,( R( a- v! g' w6 W* y
'neutral': 8699,- ^2 {' n3 A( v! H7 m
}
# 将情感标签和原始的语句的词向量组合在一起组成我们新的数据; z- D! @1 p) a! y' e% C
input_ids = [101] + [sentiment_map[sentiment]] + [102] + input_ids_orig + [102]. V( W" G" ~3 G9 ]5 @" z
input_type_ids = [0] * (len(input_ids_orig) + 4)+ q- X1 y! N3 ]) G* Z- K
attention_mask = [1] * (len(input_ids_orig) + 4)
offsets = [(0, 0), (0, 0), (0, 0)] + offsets + [(0, 0)]
target_start += 3" b4 M' B9 b% r' g% s
target_end += 3 M* ]9 m7 G( f) ~( t; i. J! \) d0 J
# 计算需要paddning的长度, BERT是以固定长度进行输入的,因此对于不足的我们需要做pandding* x4 F* g( o2 ? W7 |
padding_length = MAX_SEQUENCE_LENGTH - len(input_ids)
if padding_length > 0:' M. b, _% ]( K& Q H$ A2 \8 I5 ]
input_ids = input_ids + ([0] * padding_length)
attention_mask = attention_mask + ([0] * padding_length)
input_type_ids = input_type_ids + ([0] * padding_length)7 S( @( y3 |: e. a: j% l
offsets = offsets + ([(0, 0)] * padding_length)
elif padding_length
定义数据加载器
class TweetDataset(tf.data.Dataset):9 e7 b+ i9 h. V4 k& ~
outputTypes = (
tf.dtypes.int32, tf.dtypes.int32, tf.dtypes.int32,
tf.dtypes.int32, tf.dtypes.float32, tf.dtypes.float32,+ r1 h' D5 y& _2 w
tf.dtypes.string, tf.dtypes.string, tf.dtypes.string,
)3 C+ S( Q( h( ~5 E2 Q! V& ]/ s4 {
outputShapes = (6 o$ M% `, X( ^* M- Z2 ^( B
(128,), (128,), (128,),
(128, 2), (), (),
(), (), (),8 d/ T- }$ x% F+ Q; t
)6 H9 ]7 M8 ~9 D2 }
def _generator(tweet, selected_text, sentiment):" j& k9 t2 m* s1 m' y+ H
for tw, st, se in zip(tweet, selected_text, sentiment):, k6 O u6 v0 X
yield preprocess(tw, st, se)" j" y, d+ o1 f' Z
% G, q7 M" f6 E4 a5 E
def __new__(cls, tweet, selected_text, sentiment):2 s; q0 @+ t/ I1 X
return tf.data.Dataset.from_generator(
cls._generator,7 Y* B) U _9 E- D/ V. j
output_types=cls.outputTypes,: Q2 V# m X, p5 }2 c% a L! K
output_shapes=cls.outputShapes,
args=(tweet, selected_text, sentiment)7 _ m( A1 k9 j1 }+ G/ T( D
); ^' l$ [% P" d3 ^
# H9 o* p. O! o4 X8 `
@staticmethod- I. \+ m- ?# i
def create(dataframe, batch_size, shuffle_buffer_size=-1):
dataset = TweetDataset(
dataframe.text.values,
dataframe.selected_text.values, . e9 K! M" ^5 M/ l( V
dataframe.sentiment.values
): {& K, s8 `; x0 u; C3 i7 S6 \
dataset = dataset.cache()5 @# v6 x. M8 d" Y0 s1 ?9 ?/ O
if shuffle_buffer_size != -1:7 J+ z: E( Y) k8 w, i- }
dataset = dataset.shuffle(shuffle_buffer_size)5 y v; M0 @3 l( Z7 [; i5 Y+ a
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE): y6 ~* ~) K2 s! a' Z0 I
return dataset
定义模型' Y$ R% T# K% \( }
我们使用BERT模型来进行这次竞赛,这里对BERT模型做一些简单的介绍。5 b, N& t( u) p! |+ t/ i6 ~" V
BERT的全称是Bidirectional Encoder Representation from Transformers,即双向Transformer的Encoder,因为decoder是不能获要预测的信息的。7 W' @* ]: m2 Q u/ Y! p4 u) s: k$ D
模型的主要创新点都在pre-train方法上,即用了Masked LM和Next Sentence Prediction两种方法分别捕捉词语和句子级别representation。+ W, b) i* Z0 \; [7 J. t
BERT主要特点如下:
+ d" s; a7 `: \
使用了Transformer作为算法的主要框架,Trabsformer能更彻底的捕捉语句中的双向关系;
使用了Mask Language Model 和 Next Sentence Prediction的多任务训练目标;3 u* [' Q: t5 \( p
使用更强大的机器训练更大规模的数据,Google开源了BERT模型,我们可以直接使用BERT作为Word2Vec的转换矩阵并高效的将其应用到自己的任务中。- A6 d2 l! c9 f& U5 l6 b M& l5 W
2 p* a+ O: h a. ?# i- A; v: Y
BERT的本质是在海量的语料基础上,运行自监督学习方法让单词学习得到一个较好的特征表示。8 R$ a( F; \5 J" k- K* ?8 X
在之后特定任务中,可以直接使用BERT的特征表示作为该任务的词嵌入特征。所以BERT提供的是一个供其它任务迁移学习的模型,该模型可以根据任务微调或者固定之后作为特征提取器。8 X; M( B& }- Q* F$ @, Z/ P. p* K( u
在竞赛中,我们定义了一个BertModel类,里面使用TFBertPreTrainedModel来进行推理。2 R) t, L- Q! y; r4 k
BERT的输出我们保存在hidden_states中,然后将这个得到的hidden_states结果在加入到Dense Layer,最后输出我们需要提取的表示情感的文字的起始位置跟结束位置。
这两个位置信息就是我们需要从原文中提取的词向量的位置。( K: C. Y, V* e9 d4 A
class BertModel(TFBertPreTrainedModel):
3 l' D# l" k7 t% g$ X6 Y% @$ p
# drop out rate, 防止过拟合
dr = 0.17 h* g% c4 j6 m
# hidden state数量
hs = 2- g& ~3 E5 ]2 p) u
/ F! a; ?/ |2 o$ ^
def __init__(self, config, *inputs, **kwargs):' M# J# q7 w+ i+ o5 R
super().__init__(config, *inputs, **kwargs)% w9 m7 Z/ v( E* O5 J
self.bert = TFBertMainLayer(config, name="bert")7 k! Q/ w/ H. c# Y {+ {) o2 V
self.concat = L.Concatenate()- n3 x( K/ ?, R* z% o9 i
self.dropout = L.Dropout(self.dr)
self.qa_outputs = L.Dense(9 X2 _$ A4 Z" G6 F" A k( T. y- K7 G
config.num_labels,
kernel_initializer=TruncatedNormal(stddev=config.initializer_range),
dtype='float32',
name="qa_outputs")
6 I2 S! j$ ^( r, ?
@tf.function! n+ m' v8 G5 _' t8 S
def call(self, inputs, **kwargs):
_, _, hidden_states = self.bert(inputs, **kwargs)
hidden_states = self.concat([6 y- D' v: p* B9 c, O& Y( b
hidden_states[-i] for i in range(1, self.hs+1)# F( V8 O" a" p( U' e+ ~
])% B b' P0 P F k8 [! h1 G) H
, ^. K2 h% T2 X# c+ u @ x/ n8 G
hidden_states = self.dropout(hidden_states, training=kwargs.get("training", False))
logits = self.qa_outputs(hidden_states)
start_logits, end_logits = tf.split(logits, 2, axis=-1)! T7 _" r: J5 ~" E5 x
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)/ S, ]- t! S1 @/ F
return start_logits, end_logits
定义训练函数
5 l* l$ Z' C( A- j) q- U" ^
def train(model, dataset, loss_fn, optimizer):
" Y" Q2 Q7 B+ @8 Q+ S) N) T
@tf.function
def train_step(model, inputs, y_true, loss_fn, optimizer):
with tf.GradientTape() as tape:
y_pred = model(inputs, training=True)
loss = loss_fn(y_true[0], y_pred[0])3 \( c* W) h, [/ j9 a2 N
loss += loss_fn(y_true[1], y_pred[1])
scaled_loss = optimizer.get_scaled_loss(loss)- R9 x! _9 g: A" S: ]
' y. g* T( Y+ U8 S0 e& T/ v: B
scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables)
gradients = optimizer.get_unscaled_gradients(scaled_gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables)); `: Y2 @! U- Y
return loss, y_pred
epoch_loss = 0.% L& z4 Q2 F* e# \/ c
for batch_num, sample in enumerate(dataset):
loss, y_pred = train_step(model, sample[:3], sample[4:6], loss_fn, optimizer)6 u* I" j% a! K; b: p7 P! f
epoch_loss += loss
print(8 X3 z- n% V; Q* |- l2 f- ^ A5 t
f"training ... batch {batch_num+1:03d} : "
f"train loss {epoch_loss/(batch_num+1):.3f} ",+ `; P9 `* H8 D# m5 _1 w6 n
end='\r') y( V$ x' w# e" r# q# y( k1 ?2 P
定义预制函数
def predict(model, dataset, loss_fn, optimizer):& o6 p4 n3 C* j$ H2 l
5 [* \8 ^2 A6 S% t2 G2 h- k
@tf.function5 }& ?: D2 x. o# P+ A t
def predict_step(model, inputs):4 Q3 j8 Z8 l& H% h9 W
return model(inputs). W3 b* u4 s- Y# i
: y, n& M( q7 E! h4 t4 A& Y) r4 B& G
def to_numpy(*args):9 \4 Y" _6 V+ m% N! W
out = []
for arg in args:
if arg.dtype == tf.string:6 d6 t. W! Z, n7 f, D2 @6 w
arg = [s.decode('utf-8') for s in arg.numpy()]4 w$ o6 F5 S: z' ?# U7 H
out.append(arg)
else:
arg = arg.numpy()
out.append(arg)
return out, c" {2 F6 Q/ L% B9 k
0 o0 Q4 d. f. }% q' A6 |( {* `
offset = tf.zeros([0, 128, 2], dtype=tf.dtypes.int32)
text = tf.zeros([0,], dtype=tf.dtypes.string)
selected_text = tf.zeros([0,], dtype=tf.dtypes.string)3 \7 L( }- P8 A% g
sentiment = tf.zeros([0,], dtype=tf.dtypes.string)0 [" z6 }1 Q# M g+ |$ Z
pred_start = tf.zeros([0, 128], dtype=tf.dtypes.float32)
pred_end = tf.zeros([0, 128], dtype=tf.dtypes.float32), u% W, `+ Z6 e+ n* l6 @, k
/ V4 v" @4 {/ t9 H
for batch_num, sample in enumerate(dataset):
print(f"predicting ... batch {batch_num+1:03d}"+" "*20, end='\r')2 O1 z7 ?! [& J6 v8 j) {( O
y_pred = predict_step(model, sample[:3])
8 u: E; O- t) t/ Z
# add batch to accumulators
pred_start = tf.concat((pred_start, y_pred[0]), axis=0)" c9 F# g! m1 V; a* C& ^( u' g2 z
pred_end = tf.concat((pred_end, y_pred[1]), axis=0)
offset = tf.concat((offset, sample[3]), axis=0)4 R" c% w- m; y0 S: B3 Q' a. o
text = tf.concat((text, sample[6]), axis=0)
selected_text = tf.concat((selected_text, sample[7]), axis=0)
sentiment = tf.concat((sentiment, sample[8]), axis=0)
pred_start, pred_end, text, selected_text, sentiment, offset = \
to_numpy(pred_start, pred_end, text, selected_text, sentiment, offset)
return pred_start, pred_end, text, selected_text, sentiment, offset# E+ y, d: D$ d& @
判断函数* v' Z$ t/ Y. [& n& {
这个竞赛采用单词级Jaccard系数,计算公式如下/ _' D$ [3 T6 `$ G; ~
Jaccard系数计算的是你预测的单词在数据集中的个数,
def jaccard(str1, str2):/ K" I8 u! n3 d
a = set(str1.lower().split())# C; y: ?5 e' q! a `+ ?% h
b = set(str2.lower().split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
定义预测结果解码函数
解码函数通过模型预测拿到的start和end的index位置信息,然后和之前拿到的词向量在样本句子中的位置进行比较,将这个区间内的所有的单词都提取出来作为我们的预测结果。8 O) n8 T- l% f7 i9 @4 B( u
def decode_prediction(pred_start, pred_end, text, offset, sentiment):2 f; x# O6 B b* r2 Y
) i% E" @ r$ Q8 k
def decode(pred_start, pred_end, text, offset):
decoded_text = ""+ c/ Y" L. N1 I% p% s
for i in range(pred_start, pred_end+1):7 N1 z. H0 I" t7 I
decoded_text += text[offset[0]:offset[1]]
if (i+1) idx_end:* R0 q9 Z% E' O' E5 k+ z" Q1 e: z2 J9 T
idx_end = idx_start . o' j' K/ H4 E% y, M) `, Y5 o
decoded_text = str(decode(idx_start, idx_end, text, offset)): g! D5 \. w% x0 r. d, o
if len(decoded_text) == 0:4 n8 l1 v6 R' F. z: @2 Y
decoded_text = text7 F& F9 O5 N) l/ Z7 `, u* T8 X
decoded_predictions.append(decoded_text)5 }( i8 C- {: n4 a7 I1 |
, W6 V! X0 n1 ?6 Y- {9 D1 F8 T; D
return decoded_predictions
开始训练
将训练数据分成5个folds,每个folds训练5个epoch,使用adam优化器,learning rate设置成3e-5,batch size使用32。' t. [0 y& {% ]. f3 \
num_folds = 51 ]# e4 K+ o4 z/ w8 l5 h
num_epochs = 5
batch_size = 32: G5 v2 N2 G% [9 K M% N, L4 [- l
learning_rate = 3e-5
optimizer = tf.keras.optimizers.Adam(learning_rate)
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(6 b7 n \8 p9 F8 y& \/ {
optimizer, 'dynamic')
config = BertConfig(output_hidden_states=True, num_labels=2)! q$ m4 P! |/ i
model = BertModel.from_pretrained(PATH, config=config)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)& n4 S9 x1 T1 q, A) c& J+ ]) ?
kfold = model_selection.KFold(
n_splits=num_folds, shuffle=True, random_state=42)
test_preds_start = np.zeros((len(test_df), 128), dtype=np.float32)+ O; W+ y' X! d$ q
test_preds_end = np.zeros((len(test_df), 128), dtype=np.float32), h+ D% w- {5 L% [0 q) r$ ?
for fold_num, (train_idx, valid_idx) in enumerate(kfold.split(train_df.text)):, j9 b7 C* z. o, j9 {
print("\nfold %02d" % (fold_num+1))/ w& c; n9 {9 O) e- ~
( \$ ~; _! L* M) a5 U$ W9 K
# 创建train, valid, test数据集5 [5 G' j4 N' v
train_dataset = TweetDataset.create(8 Y# d. _* G* ]! y9 `* D6 I
train_df.iloc[train_idx], batch_size, shuffle_buffer_size=2048) M H- }' _; \" `5 B8 N# ~
valid_dataset = TweetDataset.create(
train_df.iloc[valid_idx], batch_size, shuffle_buffer_size=-1)
test_dataset = TweetDataset.create(0 g1 F) U7 L/ ~1 p% y- ?# y4 d; ` H; T
test_df, batch_size, shuffle_buffer_size=-1)/ p6 S5 k2 \; m+ M# w4 T
best_score = float('-inf'); [% w& l- l6 Z& P3 D' [. G
for epoch_num in range(num_epochs):* E% p! B+ B; l8 M
print("\nepoch %03d" % (epoch_num+1))4 n; x) c: i. W5 ?4 Z! s! C, b n8 d2 v
3 c: D6 Q9 Y$ D0 [) w
train(model, train_dataset, loss_fn, optimizer)' D2 R4 b$ L l
" x: X$ ~+ [; z( s, V1 t# B
pred_start, pred_end, text, selected_text, sentiment, offset = \, F5 s+ l0 O- a; i8 G2 G- B8 ~
predict(model, valid_dataset, loss_fn, optimizer)2 x. R+ @, O1 @* O: Y
; ?7 a3 D( C2 Z
selected_text_pred = decode_prediction(7 `, P7 J$ t' v/ n {8 q4 b9 ]# q: m# D
pred_start, pred_end, text, offset, sentiment)
jaccards = []; |# l$ c# C5 |2 R
for i in range(len(selected_text)):
jaccards.append(
jaccard(selected_text, selected_text_pred))0 s! C6 w1 J1 q
score = np.mean(jaccards)" H6 P. e4 K6 L" _" s* p
print(f"valid jaccard epoch {epoch_num+1:03d}: {score}"+" "*15)/ Q1 {# s, I+ K/ B/ G+ E0 D
if score > best_score:
best_score = score' [" O/ }/ h, V+ b: p
- m, A" G$ Z0 D1 [; Q
# predict test set9 E) n/ N& L$ n+ l
test_pred_start, test_pred_end, test_text, _, test_sentiment, test_offset = \
predict(model, test_dataset, loss_fn, optimizer)7 ^8 G( b' n3 u$ B; i
8 V4 K# Y' ^. y! z& H) c( t* _
test_preds_start += test_pred_start * 0.2
test_preds_end += test_pred_end * 0.20 \2 `6 k6 m) V |$ ?4 Z5 I9 q
# s) [; U8 n7 x& Y2 X
# 重置模型,避免OOM
session = tf.compat.v1.get_default_session()
graph = tf.compat.v1.get_default_graph()
del session, graph, model
model = BertModel.from_pretrained(PATH, config=config)
预测测试数据,并生成提交文件5 a" }$ h) p# E7 E9 I: U1 f/ p
selected_text_pred = decode_prediction( Z" h& [* ~7 u8 \4 U b
test_preds_start, test_preds_end, test_text, test_offset, test_sentiment)1 C5 a8 n: \4 H3 X, a
def f(selected):* R8 _' m* i. J: h( m9 x% F& K
return " ".join(set(selected.lower().split()))
submission_df.loc[:, 'selected_text'] = selected_text_pred
submission_df['selected_text'] = submission_df['selected_text'].map(f)
submission_df.to_csv("submission.csv", index=False)
这个方案在提交的时候在553个队伍中排名153位, 分数为0.68。
成为第一个吐槽的人