使用预训练的模型BERT来完成对整个竞赛的数据分析
lf0517
发表于 2023-1-14 22:04:15
1074
0
0
导入需要的库
import numpy as np
import pandas as pd
from math import ceil, floor
import tensorflow as tf. p4 K; i, G0 o! A$ t7 i
import tensorflow.keras.layers as L
from tensorflow.keras.initializers import TruncatedNormal8 K( v( Y# _2 o" O
from sklearn import model_selection
from transformers import BertConfig, TFBertPreTrainedModel, TFBertMainLayer
from tokenizers import BertWordPieceTokenizer
读取并解释数据6 \/ h( H9 @6 X% F/ U5 o
在竞赛中,对数据的理解是非常关键的。因此我们首先要做的就是读取数据,然后查看数据的内容以及特点。. m% F, T T w' M3 z/ ]( t
先用pandas来读取csv数据,
train_df = pd.read_csv('train.csv')
train_df.dropna(inplace=True)3 L" E: L2 A" i
test_df = pd.read_csv('test.csv')3 m- z8 A/ c. G3 C
test_df.loc[:, "selected_text"] = test_df.text.values
submission_df = pd.read_csv('sample_submission.csv')
再查看下我们的数据的数量,我们一共有27485条训练数据,3535条测试数据,
print("train numbers =", train_df.shape)5 \- C9 U, \( B% {' J
print("test numbers =", test_df.shape)+ d. o* W( X# x* q8 m$ f9 ]+ T
紧接着查看训练数据和测试数据前10条表单的字段跟数据,表单中包含了一下几个数据字段:
textID: 文本数据记录的唯一ID;
text: 原始语句; P2 e$ b' z$ ] R. V$ r9 n
1 V- ~3 @& [ H4 d5 I }$ T7 A6 N0 {
selected_text: 表示情感的语句;' W+ I F) _. T) Y7 X# |4 E0 h$ n k
sentiment: 情感类型, neutral中立, positive积极, negative消极;* S" ~9 b/ | ]6 `, T/ k
9 y5 e3 e s [6 \4 o
+ Y, A2 h3 q Q
从数据中我们可以得出,目标就是根据现有的情感从原本是的语句中选出能代表这个情感的语句部分。
train_df.head(10)
test_df.head(10)% L! X4 l- u) t/ C( V( Y
定义常量9 I( Y+ u$ i5 O9 N! X& \
# bert预训练权重跟数据存放的目录
PATH = "./bert-base-uncased/"$ ]5 L, t5 S$ |! i) i6 I) s
# 语句最大长度2 V9 g/ k$ J) J$ A3 s
MAX_SEQUENCE_LENGTH = 128
载入词向量1 U2 i& m D S4 R
BERT是依据一个固定的词向量来进行训练的。因此在竞赛中需要先使用BertWordPieceTokenizer来加载这些词向量,其中的lowercase=True表示所有的词向量都是小写。设置大小写不敏感可以减少模型对资源的占用。2 N2 K2 ?" H G. ? P
TOKENIZER = BertWordPieceTokenizer(f"{PATH}/vocab.txt", lowercase=True)
定义数据加载器5 h- ]- b) Q( K* }9 l' {/ ]
定义数据预处理函数
def preprocess(tweet, selected_text, sentiment):
# 将被转成byte string的原始字符串转成utf-8的字符串
tweet = tweet.decode('utf-8')
selected_text = selected_text.decode('utf-8')
sentiment = sentiment.decode('utf-8')( D# |: _8 Q4 |
tweet = " ".join(str(tweet).split()) S/ e8 |1 N# G' e0 s5 r4 ^
selected_text = " ".join(str(selected_text).split()). M# \. O A8 l' R! b
% [8 u: h; K. g6 l" s/ q% e
# 标记出selected text和text共有的单词 X. W' W1 n/ E4 x: A
idx_start, idx_end = None, None
for index in (i for i, c in enumerate(tweet) if c == selected_text[0]):
if tweet[index:index+len(selected_text)] == selected_text:
idx_start = index
idx_end = index + len(selected_text)
break4 s& Q' a& l/ r9 X5 }! }
intersection = [0] * len(tweet)0 C; h: z: f: C0 a0 B( V9 z9 m
if idx_start != None and idx_end != None:2 e* V: Z8 i9 i/ o( f5 V, w b
for char_idx in range(idx_start, idx_end):
intersection[char_idx] = 1
# 对原始数据用词向量进行编码, 这里会返回原始数据中的词在词向量中的下标2 ~' J& o9 f+ X8 k
# 和原始数据中每个词向量的单词在文中的起始位置跟结束位置
enc = TOKENIZER.encode(tweet)# l4 `3 ~7 E0 r6 R
input_ids_orig, offsets = enc.ids, enc.offsets a# w1 b; D9 `& O" ~# t: y2 B
target_idx = []
for i, (o1, o2) in enumerate(offsets):2 z, o( W& y% ]( z A8 w, U
if sum(intersection[o1: o2]) > 0:
target_idx.append(i)9 _" [8 [+ j& ?1 B7 e4 B& V
target_start = target_idx[0]& ^( x! y6 U* T- @" Z
target_end = target_idx[-1]# p- n$ m0 R1 [5 E: t* Z
sentiment_map = {
'positive': 3893,
'negative': 4997,9 \, @! Q: n1 ]. R: N
'neutral': 8699,
}: e/ U0 _; Q2 r A; A5 E1 W+ K
# 将情感标签和原始的语句的词向量组合在一起组成我们新的数据
input_ids = [101] + [sentiment_map[sentiment]] + [102] + input_ids_orig + [102]( J/ S' B" w/ [
input_type_ids = [0] * (len(input_ids_orig) + 4)) s. H, A0 n6 r' }( o
attention_mask = [1] * (len(input_ids_orig) + 4)( K7 } T' r+ \, N- D6 O
offsets = [(0, 0), (0, 0), (0, 0)] + offsets + [(0, 0)]% \( O1 H8 F4 c! ]' D/ e2 i
target_start += 3
target_end += 3; [4 U6 T7 U) D3 Z! R7 G8 Y, i
# 计算需要paddning的长度, BERT是以固定长度进行输入的,因此对于不足的我们需要做pandding
padding_length = MAX_SEQUENCE_LENGTH - len(input_ids)
if padding_length > 0:( D; \6 ?! g; x p9 C0 t
input_ids = input_ids + ([0] * padding_length)! r$ J: p [# [. J
attention_mask = attention_mask + ([0] * padding_length)
input_type_ids = input_type_ids + ([0] * padding_length)- r6 q8 t) w! O% q( }. h
offsets = offsets + ([(0, 0)] * padding_length)# A4 [# @: L7 e% f; t$ w
elif padding_length ' [- j. O- O2 @0 r; c6 c2 M
定义数据加载器
' K' t8 L- G, y
class TweetDataset(tf.data.Dataset):! s- e* n/ n5 B0 Z0 a+ A
outputTypes = (
tf.dtypes.int32, tf.dtypes.int32, tf.dtypes.int32, 9 Q1 b2 p! P- d
tf.dtypes.int32, tf.dtypes.float32, tf.dtypes.float32,
tf.dtypes.string, tf.dtypes.string, tf.dtypes.string,' y- N: {5 [6 P: n
)5 D) K7 K# `9 C/ {$ m" @
- r1 r/ `0 @5 l( t9 q" E
outputShapes = (
(128,), (128,), (128,), : N1 w5 n" t; Q
(128, 2), (), (),# u+ D: b0 J5 q, I2 k0 ]* V/ k" Q; `
(), (), (),
)' V7 t- j& W5 o$ V2 X
def _generator(tweet, selected_text, sentiment):
for tw, st, se in zip(tweet, selected_text, sentiment):
yield preprocess(tw, st, se)( r( [, e* y! O( \: q" ]7 O! p
e2 p J5 e( r1 G% C( j5 j! I
def __new__(cls, tweet, selected_text, sentiment): e! d' F) }+ q A" H0 i* @6 Z
return tf.data.Dataset.from_generator(, m5 s& W6 E1 X
cls._generator,7 n% o4 Y! Y2 @& z1 x* r; E% m4 {
output_types=cls.outputTypes,9 d8 @! k d/ J E% b! @
output_shapes=cls.outputShapes,' d. u ]5 Q0 r
args=(tweet, selected_text, sentiment)
). Q4 }5 C2 x: c+ w0 |' D6 Y! W9 ^: @
- i; F, L, n$ \5 C6 J+ [1 x3 I" X
@staticmethod* l' F# Y$ c9 L* Q- W: b4 h* f
def create(dataframe, batch_size, shuffle_buffer_size=-1):9 ?. j% J# a/ @, T8 P! q- W
dataset = TweetDataset(
dataframe.text.values,
dataframe.selected_text.values, ; k& k- {' a; e) ^! M' K& r0 u
dataframe.sentiment.values. V' t; p, y) \% U4 t9 y
)
dataset = dataset.cache()
if shuffle_buffer_size != -1:# Z3 A# k8 O4 g* X) n; b
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)' `. |) |2 @4 x2 v0 X
return dataset
定义模型
我们使用BERT模型来进行这次竞赛,这里对BERT模型做一些简单的介绍。
BERT的全称是Bidirectional Encoder Representation from Transformers,即双向Transformer的Encoder,因为decoder是不能获要预测的信息的。) f" k1 q/ U- ~7 g1 g
模型的主要创新点都在pre-train方法上,即用了Masked LM和Next Sentence Prediction两种方法分别捕捉词语和句子级别representation。
BERT主要特点如下:
, ^4 `1 }1 c9 T G& y# ~% \& q( Y0 y3 S
使用了Transformer作为算法的主要框架,Trabsformer能更彻底的捕捉语句中的双向关系;
! l$ l9 S: F8 B$ C' I
使用了Mask Language Model 和 Next Sentence Prediction的多任务训练目标;
使用更强大的机器训练更大规模的数据,Google开源了BERT模型,我们可以直接使用BERT作为Word2Vec的转换矩阵并高效的将其应用到自己的任务中。+ U G F, I" T5 a* T8 r; H
2 K# |- v7 O/ B Z) M
BERT的本质是在海量的语料基础上,运行自监督学习方法让单词学习得到一个较好的特征表示。 [6 d9 Y; G. m: C$ C1 x
在之后特定任务中,可以直接使用BERT的特征表示作为该任务的词嵌入特征。所以BERT提供的是一个供其它任务迁移学习的模型,该模型可以根据任务微调或者固定之后作为特征提取器。
在竞赛中,我们定义了一个BertModel类,里面使用TFBertPreTrainedModel来进行推理。
BERT的输出我们保存在hidden_states中,然后将这个得到的hidden_states结果在加入到Dense Layer,最后输出我们需要提取的表示情感的文字的起始位置跟结束位置。- n7 t/ M5 Q4 v# Q$ t) k5 x
这两个位置信息就是我们需要从原文中提取的词向量的位置。$ k% G2 ~( B! \1 [4 G
0 K( v1 u+ D' e9 T: ~0 t: y8 }+ I
class BertModel(TFBertPreTrainedModel):- m0 c2 S6 S5 k4 ]! I
# drop out rate, 防止过拟合' f1 F* u6 G% d( ?
dr = 0.16 n7 w1 ?! ?3 f: ]0 b
# hidden state数量
hs = 2
6 T C8 d8 p" R) ^( Q4 L9 Z- u! v1 a
def __init__(self, config, *inputs, **kwargs):4 ^1 x4 F6 z( c( D0 w
super().__init__(config, *inputs, **kwargs)) X9 Q: [! {# X. d
self.bert = TFBertMainLayer(config, name="bert"); u" Y4 i2 u J/ i
self.concat = L.Concatenate()
self.dropout = L.Dropout(self.dr)
self.qa_outputs = L.Dense(; j3 x5 e5 |$ t1 R- ]. Q. B9 c, ~! R
config.num_labels,
kernel_initializer=TruncatedNormal(stddev=config.initializer_range),! Q1 G7 n: O& D) w
dtype='float32',& s* e" {) b( q* F# x
name="qa_outputs")" y( f u* d! a/ @. |
$ l3 m/ v) |+ w. ~/ c$ O1 o
@tf.function8 g# X$ S! U& _* b/ t
def call(self, inputs, **kwargs):! N# o+ q) x$ Z: d( K6 H
_, _, hidden_states = self.bert(inputs, **kwargs)
hidden_states = self.concat([0 f" @& L# W3 F5 ?/ ?
hidden_states[-i] for i in range(1, self.hs+1)
])
hidden_states = self.dropout(hidden_states, training=kwargs.get("training", False))5 V* s* d+ k( E/ j7 p T! H
logits = self.qa_outputs(hidden_states): J! W$ M* Q! y+ x
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)) v# Y V6 W @" G: Q8 _
end_logits = tf.squeeze(end_logits, axis=-1) g }. `: L8 w
# V5 D/ A2 p( h0 E3 ?) x4 J1 f
return start_logits, end_logits
定义训练函数
def train(model, dataset, loss_fn, optimizer):
" T! u! C) R8 s$ Z, `
@tf.function
def train_step(model, inputs, y_true, loss_fn, optimizer):
with tf.GradientTape() as tape:( B7 u" F+ `! J: ?1 I
y_pred = model(inputs, training=True)
loss = loss_fn(y_true[0], y_pred[0])
loss += loss_fn(y_true[1], y_pred[1])$ Y/ {$ C& ^! D ^( b' j a
scaled_loss = optimizer.get_scaled_loss(loss): j1 d7 _( ~/ D/ n9 |# b0 N
scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables)3 ~ Y) q, t" t; Q, i4 T
gradients = optimizer.get_unscaled_gradients(scaled_gradients)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))$ i5 E% c0 r' ~$ B1 u" w& A
return loss, y_pred. |; p v! ]; R- z: j
epoch_loss = 0.
for batch_num, sample in enumerate(dataset):
loss, y_pred = train_step(model, sample[:3], sample[4:6], loss_fn, optimizer)- ]7 \: {% a7 l/ ~3 |
epoch_loss += loss: E% r1 B) y5 x+ D% ^+ a- C
print(' D' T# Z; Y5 H+ ?0 F
f"training ... batch {batch_num+1:03d} : "
f"train loss {epoch_loss/(batch_num+1):.3f} ",5 Z& t/ P. t2 F5 o! j8 [# s* m( R% O
end='\r')4 m2 H7 {8 o1 p
定义预制函数$ ]( l, a+ \0 `- T
def predict(model, dataset, loss_fn, optimizer):
" z2 W6 [6 k% t; O; ~+ @
@tf.function' ?7 G$ B& p" C$ ~5 y( \2 b, t; u
def predict_step(model, inputs):: h4 V/ X- Y/ R/ v/ c7 M
return model(inputs)
def to_numpy(*args):9 i8 {/ S' K* s
out = []
for arg in args:
if arg.dtype == tf.string:
arg = [s.decode('utf-8') for s in arg.numpy()]
out.append(arg)
else:1 s4 [: e# c- m" h# {
arg = arg.numpy()- k! v# _9 [1 L2 g
out.append(arg)6 [: }) \9 Z4 W6 p+ y4 ?
return out
offset = tf.zeros([0, 128, 2], dtype=tf.dtypes.int32)% P8 S' s [) }
text = tf.zeros([0,], dtype=tf.dtypes.string)& M4 {3 F3 N& b' Q( T
selected_text = tf.zeros([0,], dtype=tf.dtypes.string)0 X/ o+ F' U2 n! K! F! w
sentiment = tf.zeros([0,], dtype=tf.dtypes.string)
pred_start = tf.zeros([0, 128], dtype=tf.dtypes.float32)4 j- d9 ~: H; Y# ?+ e) ~
pred_end = tf.zeros([0, 128], dtype=tf.dtypes.float32)$ H+ W$ |3 U) o3 `: H) G: e
* Q: _1 {. L' w8 Q7 p
for batch_num, sample in enumerate(dataset):& D7 L7 c& d i. p+ ?* r
print(f"predicting ... batch {batch_num+1:03d}"+" "*20, end='\r')+ N1 h% J5 L; v' c/ D" o5 Z
! ^2 x+ N9 F) `" b3 f
y_pred = predict_step(model, sample[:3])/ c+ N, {& m: x' ^" M* W- g5 m9 q. A$ ?4 e6 t
7 b, g! `& B/ @' u
# add batch to accumulators) G# w) \4 y: S) ~7 u! S! a4 H
pred_start = tf.concat((pred_start, y_pred[0]), axis=0)
pred_end = tf.concat((pred_end, y_pred[1]), axis=0)0 D: B( L' G) Q. X" q
offset = tf.concat((offset, sample[3]), axis=0)
text = tf.concat((text, sample[6]), axis=0)
selected_text = tf.concat((selected_text, sample[7]), axis=0)$ z* i& [6 g i" G4 k
sentiment = tf.concat((sentiment, sample[8]), axis=0)/ c: ?6 G3 D3 c! J1 i
pred_start, pred_end, text, selected_text, sentiment, offset = \ P8 r, t5 G5 C3 m4 l& @
to_numpy(pred_start, pred_end, text, selected_text, sentiment, offset)
2 D; x6 _/ ]1 V: n5 X# X" U, q. z9 X
return pred_start, pred_end, text, selected_text, sentiment, offset
判断函数
这个竞赛采用单词级Jaccard系数,计算公式如下
Jaccard系数计算的是你预测的单词在数据集中的个数,' r6 x* O" s' {) ~1 |' Z( \/ m3 v* E
def jaccard(str1, str2):; Z3 Z: u+ n; M- }# T: G( e
a = set(str1.lower().split())0 O( X7 R: B* a3 W+ Y
b = set(str2.lower().split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))" i8 K+ _' T4 l
定义预测结果解码函数! S% Y3 P2 p- D8 G& L* p
解码函数通过模型预测拿到的start和end的index位置信息,然后和之前拿到的词向量在样本句子中的位置进行比较,将这个区间内的所有的单词都提取出来作为我们的预测结果。
def decode_prediction(pred_start, pred_end, text, offset, sentiment):
0 E& w1 `7 o+ Q4 D
def decode(pred_start, pred_end, text, offset):
decoded_text = ""0 q( }: E- i% T' P9 H0 ?+ N
for i in range(pred_start, pred_end+1):4 A: d; C! f: H8 ~1 W! ^
decoded_text += text[offset[0]:offset[1]], j9 o2 [( `% C; h) o6 _8 s" ~
if (i+1) idx_end:$ d* }0 y# E8 {- y( a8 U, g. u
idx_end = idx_start
decoded_text = str(decode(idx_start, idx_end, text, offset))+ _# W) p! ?4 u, v5 E
if len(decoded_text) == 0:2 H: B/ G; `9 T9 ~% r3 `
decoded_text = text" E' O( u& o4 E0 D7 |
decoded_predictions.append(decoded_text)
* b; W+ j. _% ?) c& r
return decoded_predictions- ]! f! a8 m$ T- |1 S( q
开始训练
将训练数据分成5个folds,每个folds训练5个epoch,使用adam优化器,learning rate设置成3e-5,batch size使用32。3 \+ K+ ` ]. U5 F3 W
num_folds = 50 g& r6 b5 U8 I" w# U4 E' L
num_epochs = 52 x+ O% Y0 V; h' S f# K, B
batch_size = 32
learning_rate = 3e-5
optimizer = tf.keras.optimizers.Adam(learning_rate)* \# n2 X# U' }5 p, H' w0 w3 n
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(' F. t6 I" Q/ v3 h- `! `$ V _
optimizer, 'dynamic')
config = BertConfig(output_hidden_states=True, num_labels=2)! y; B. L/ v1 G6 S1 X: @4 B
model = BertModel.from_pretrained(PATH, config=config)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
kfold = model_selection.KFold(" c' y8 i" {' z& s! E
n_splits=num_folds, shuffle=True, random_state=42)! y {5 Q: p8 @# e$ h* K5 Y
test_preds_start = np.zeros((len(test_df), 128), dtype=np.float32)- f: B" E# G& z8 h! `
test_preds_end = np.zeros((len(test_df), 128), dtype=np.float32)
for fold_num, (train_idx, valid_idx) in enumerate(kfold.split(train_df.text)):, |. f% A, h" t1 z3 W" @+ G3 H& c
print("\nfold %02d" % (fold_num+1))
+ }8 {" T8 _# q) ~
# 创建train, valid, test数据集1 s, ?* i# J8 Z! B+ R+ T) m2 y
train_dataset = TweetDataset.create(
train_df.iloc[train_idx], batch_size, shuffle_buffer_size=2048)
valid_dataset = TweetDataset.create(
train_df.iloc[valid_idx], batch_size, shuffle_buffer_size=-1)0 V" M6 Y# Q, }) b4 H5 C& V. {! |
test_dataset = TweetDataset.create(
test_df, batch_size, shuffle_buffer_size=-1)2 J& X7 Z- J) g4 D
best_score = float('-inf')
for epoch_num in range(num_epochs):" _% O2 ^3 ]9 }7 N$ u" X
print("\nepoch %03d" % (epoch_num+1))
train(model, train_dataset, loss_fn, optimizer)
pred_start, pred_end, text, selected_text, sentiment, offset = \
predict(model, valid_dataset, loss_fn, optimizer)
' E# A+ C: b6 H# }. y4 X: E n
selected_text_pred = decode_prediction(( n4 x- [$ N5 t& |
pred_start, pred_end, text, offset, sentiment)7 e: h" d( M$ A+ R" B- v+ \
jaccards = []
for i in range(len(selected_text)):' e. y7 u6 T% U' ]! l
jaccards.append(
jaccard(selected_text, selected_text_pred))+ u* H: g3 |+ u) }" g: D+ T
+ T5 N8 g. Y) t7 r! }
score = np.mean(jaccards)" r9 b8 x9 V: I: f
print(f"valid jaccard epoch {epoch_num+1:03d}: {score}"+" "*15)
if score > best_score:
best_score = score
4 v* P; G7 v6 k1 J0 y5 Y
# predict test set
test_pred_start, test_pred_end, test_text, _, test_sentiment, test_offset = \" l) g3 @ d( y2 v0 ?
predict(model, test_dataset, loss_fn, optimizer)
test_preds_start += test_pred_start * 0.23 ] o1 T. O4 V S
test_preds_end += test_pred_end * 0.2
# 重置模型,避免OOM6 R. j# \4 Z% c/ ?$ l
session = tf.compat.v1.get_default_session()5 a3 \$ b6 c& d
graph = tf.compat.v1.get_default_graph()$ I) M: ]6 J; V
del session, graph, model5 N9 h2 e; A8 }% h% Q
model = BertModel.from_pretrained(PATH, config=config)2 T t6 k: D' V( @. ]2 U3 a$ m& T
预测测试数据,并生成提交文件* K0 z/ t" N# F7 T/ m# L
selected_text_pred = decode_prediction() D0 Y6 {2 T& s/ q$ |
test_preds_start, test_preds_end, test_text, test_offset, test_sentiment)
def f(selected):& c( I' H7 j) `) d& S
return " ".join(set(selected.lower().split()))1 Q( \4 a4 |% E' K, Z
submission_df.loc[:, 'selected_text'] = selected_text_pred
submission_df['selected_text'] = submission_df['selected_text'].map(f)4 J% r; t8 X( o9 W
submission_df.to_csv("submission.csv", index=False)
这个方案在提交的时候在553个队伍中排名153位, 分数为0.68。
成为第一个吐槽的人