使用预训练的模型BERT来完成对整个竞赛的数据分析
lf0517
发表于 2023-1-14 22:04:15
978
0
0
导入需要的库
import numpy as np
import pandas as pd% D, G3 v/ @% u& D% N* \" \7 Y
from math import ceil, floor
import tensorflow as tf
import tensorflow.keras.layers as L
from tensorflow.keras.initializers import TruncatedNormal+ X9 Y, x* A2 o ?0 v
from sklearn import model_selection1 m, ~2 V; [7 T. Z; k
from transformers import BertConfig, TFBertPreTrainedModel, TFBertMainLayer
from tokenizers import BertWordPieceTokenizer
读取并解释数据
在竞赛中,对数据的理解是非常关键的。因此我们首先要做的就是读取数据,然后查看数据的内容以及特点。
先用pandas来读取csv数据,/ }1 v% T9 ?) z' U/ c8 u& q% s
' J1 B4 k6 K7 `- O- g3 p
train_df = pd.read_csv('train.csv')
train_df.dropna(inplace=True), p! r: Y+ Z6 x- g- Y2 I. [
test_df = pd.read_csv('test.csv')" f! ^! x$ v+ g$ {. B- q
test_df.loc[:, "selected_text"] = test_df.text.values
submission_df = pd.read_csv('sample_submission.csv')
再查看下我们的数据的数量,我们一共有27485条训练数据,3535条测试数据,
print("train numbers =", train_df.shape)0 {( G N) U5 ?* J
print("test numbers =", test_df.shape)
紧接着查看训练数据和测试数据前10条表单的字段跟数据,表单中包含了一下几个数据字段:, l& S% I$ ^6 i3 p5 t1 S9 v' f
6 m Y7 X) X+ B' n& I8 q% v
textID: 文本数据记录的唯一ID;& h, I3 k+ |' f" C
9 E; s1 D9 b. ~8 j
text: 原始语句;- K9 i1 W; ^2 j! d0 j& l; S
selected_text: 表示情感的语句;" a/ o/ r0 \% ~0 ^4 Y! c
" P# W- A4 d( |2 r* }$ o9 a+ e
sentiment: 情感类型, neutral中立, positive积极, negative消极;2 G9 d- g; H5 U7 F
从数据中我们可以得出,目标就是根据现有的情感从原本是的语句中选出能代表这个情感的语句部分。. j" L- }$ J( `! `6 n% e, E' _
train_df.head(10)
test_df.head(10)
定义常量
# bert预训练权重跟数据存放的目录7 i( I9 R! k. d
PATH = "./bert-base-uncased/"
# 语句最大长度
MAX_SEQUENCE_LENGTH = 128
载入词向量- N6 [6 P, D. {) y! K& j5 S$ M" W
BERT是依据一个固定的词向量来进行训练的。因此在竞赛中需要先使用BertWordPieceTokenizer来加载这些词向量,其中的lowercase=True表示所有的词向量都是小写。设置大小写不敏感可以减少模型对资源的占用。
TOKENIZER = BertWordPieceTokenizer(f"{PATH}/vocab.txt", lowercase=True)
定义数据加载器3 T& k; t* R' p9 N* P
定义数据预处理函数
def preprocess(tweet, selected_text, sentiment):
# 将被转成byte string的原始字符串转成utf-8的字符串2 }9 D! E3 \/ C5 N: P: Z
tweet = tweet.decode('utf-8')+ f" e) E* a, L z) e
selected_text = selected_text.decode('utf-8')7 O0 I) t4 `# A- t9 S/ S. x1 e
sentiment = sentiment.decode('utf-8')( k/ c7 p2 ~, B! P9 R
tweet = " ".join(str(tweet).split())
selected_text = " ".join(str(selected_text).split())
) s* W# E1 J" i7 L* `4 Z
# 标记出selected text和text共有的单词
idx_start, idx_end = None, None- {% Z% I" w5 u. y' P6 N& Q
for index in (i for i, c in enumerate(tweet) if c == selected_text[0]):. V6 u' M' P5 j1 j7 T3 h7 S
if tweet[index:index+len(selected_text)] == selected_text:
idx_start = index
idx_end = index + len(selected_text)" ]& _' x9 B8 U# `7 j( F9 U
break; Q2 {. U! z$ F! y- \/ @0 j
intersection = [0] * len(tweet)
if idx_start != None and idx_end != None:! k$ j$ T" O: W N% l0 t
for char_idx in range(idx_start, idx_end):
intersection[char_idx] = 15 {$ O% q- k& u$ v6 i
* l8 z% l5 f! G4 B5 v7 G
# 对原始数据用词向量进行编码, 这里会返回原始数据中的词在词向量中的下标" F) a& z# ~' G L8 [. ]
# 和原始数据中每个词向量的单词在文中的起始位置跟结束位置
enc = TOKENIZER.encode(tweet)
input_ids_orig, offsets = enc.ids, enc.offsets
target_idx = []
for i, (o1, o2) in enumerate(offsets):
if sum(intersection[o1: o2]) > 0:
target_idx.append(i)7 o1 y( r% ^; ^* h
target_start = target_idx[0]$ Y4 ?$ p7 |2 Y' A9 Y# i# V. o
target_end = target_idx[-1]1 v7 E, E: ]8 L7 {
sentiment_map = {
'positive': 3893,: ]7 }5 R$ n2 _& c4 ^3 g+ E& k
'negative': 4997,
'neutral': 8699,7 A6 j/ Y( f2 E- O
}
" ]0 G( S, ? O
# 将情感标签和原始的语句的词向量组合在一起组成我们新的数据$ o5 B6 W, q; B$ P, s* f( A" B
input_ids = [101] + [sentiment_map[sentiment]] + [102] + input_ids_orig + [102]5 w! T/ h r2 l% J8 X
input_type_ids = [0] * (len(input_ids_orig) + 4); J4 Y( W6 t# F7 F5 W1 Q
attention_mask = [1] * (len(input_ids_orig) + 4)0 k% C2 K: z6 U- I6 v& o
offsets = [(0, 0), (0, 0), (0, 0)] + offsets + [(0, 0)]
target_start += 3
target_end += 3
# 计算需要paddning的长度, BERT是以固定长度进行输入的,因此对于不足的我们需要做pandding
padding_length = MAX_SEQUENCE_LENGTH - len(input_ids)
if padding_length > 0:
input_ids = input_ids + ([0] * padding_length)0 h& g8 X: C7 B9 A8 q# }2 e
attention_mask = attention_mask + ([0] * padding_length)
input_type_ids = input_type_ids + ([0] * padding_length)1 N" y$ _8 g+ a$ N x
offsets = offsets + ([(0, 0)] * padding_length)
elif padding_length * y K6 U$ R* B. A* ~# o
定义数据加载器& m+ T( n) _. A+ Z
4 G2 n; n. `+ G: k0 T* R& Y
class TweetDataset(tf.data.Dataset):( Q5 L9 [! _, v% U
/ q- `$ m! y5 z8 ?7 x1 S
outputTypes = (! `& n; \+ b1 @
tf.dtypes.int32, tf.dtypes.int32, tf.dtypes.int32,
tf.dtypes.int32, tf.dtypes.float32, tf.dtypes.float32,
tf.dtypes.string, tf.dtypes.string, tf.dtypes.string,
)
' }1 K/ r, G1 T7 {) b: z% _
outputShapes = (( H0 F: B N+ w2 n6 ~+ A
(128,), (128,), (128,), " J; \! |* N2 n
(128, 2), (), (),
(), (), (),
)
$ D/ z2 V" C* C. ~, D) V V
def _generator(tweet, selected_text, sentiment):; o0 `$ S+ [: _. p8 V! T( \4 T5 g
for tw, st, se in zip(tweet, selected_text, sentiment):! t) k+ _! G6 m% P; s2 M9 q! [( [
yield preprocess(tw, st, se)
def __new__(cls, tweet, selected_text, sentiment):0 W' Q s& Y6 q7 p/ f' w
return tf.data.Dataset.from_generator( T r7 f e+ I0 ^( L' A, |6 \1 c
cls._generator,
output_types=cls.outputTypes,
output_shapes=cls.outputShapes,5 X+ f! J: q, e2 b. [( [$ z
args=(tweet, selected_text, sentiment)& E, D: I/ \4 X( B% f
)
@staticmethod
def create(dataframe, batch_size, shuffle_buffer_size=-1):( L" R. r) a* c4 w/ Z
dataset = TweetDataset(9 N$ o; I' o; b0 J: O, E7 h
dataframe.text.values, 3 j, R2 M$ |: X+ W/ p4 X5 E
dataframe.selected_text.values, 0 `8 |' F% f. a% ~& O% B9 Z# @
dataframe.sentiment.values, ?4 K+ u2 K2 S5 F6 \+ b
)
dataset = dataset.cache(); H* q% `( b( o" X/ r" v' ~/ ]
if shuffle_buffer_size != -1:
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(batch_size)) F8 d& b" b% ?5 C
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)6 @: b1 M( r$ ~- S, w5 C, I
return dataset0 I& {) ?1 i6 q3 Y! ]
定义模型' V" G+ `% z( t* v' H: K$ s
我们使用BERT模型来进行这次竞赛,这里对BERT模型做一些简单的介绍。! B+ k3 I2 o2 N' m- H y
BERT的全称是Bidirectional Encoder Representation from Transformers,即双向Transformer的Encoder,因为decoder是不能获要预测的信息的。1 s1 S/ D7 J4 T- r
模型的主要创新点都在pre-train方法上,即用了Masked LM和Next Sentence Prediction两种方法分别捕捉词语和句子级别representation。: _6 C9 C! Y- v% F
BERT主要特点如下:+ `9 Z0 E! \2 ?/ q! B& D. t
使用了Transformer作为算法的主要框架,Trabsformer能更彻底的捕捉语句中的双向关系;0 }' F3 E% k$ K
使用了Mask Language Model 和 Next Sentence Prediction的多任务训练目标;
使用更强大的机器训练更大规模的数据,Google开源了BERT模型,我们可以直接使用BERT作为Word2Vec的转换矩阵并高效的将其应用到自己的任务中。
! {. w0 Q, Q7 S; `
BERT的本质是在海量的语料基础上,运行自监督学习方法让单词学习得到一个较好的特征表示。' P8 z8 z6 ^7 l9 {( m
在之后特定任务中,可以直接使用BERT的特征表示作为该任务的词嵌入特征。所以BERT提供的是一个供其它任务迁移学习的模型,该模型可以根据任务微调或者固定之后作为特征提取器。
在竞赛中,我们定义了一个BertModel类,里面使用TFBertPreTrainedModel来进行推理。2 H l8 X6 O" S1 f$ M2 W
BERT的输出我们保存在hidden_states中,然后将这个得到的hidden_states结果在加入到Dense Layer,最后输出我们需要提取的表示情感的文字的起始位置跟结束位置。
这两个位置信息就是我们需要从原文中提取的词向量的位置。
8 D: i. P1 N( U4 e
class BertModel(TFBertPreTrainedModel):0 y$ V7 s7 K J$ b: X9 n+ @
. [# g1 v5 |7 D4 d( x
# drop out rate, 防止过拟合4 E' Q) D# t9 } R3 @# _& i
dr = 0.1' U) I! T* U% G7 l* U
# hidden state数量
hs = 25 ]- s5 x. z: N' ], W
def __init__(self, config, *inputs, **kwargs):4 S: C% q: Z( H: k4 y6 B
super().__init__(config, *inputs, **kwargs)
. c a9 v/ }! R$ x$ r- F' W4 T" t
self.bert = TFBertMainLayer(config, name="bert")
self.concat = L.Concatenate()& N- W' ?9 m4 D7 v; u
self.dropout = L.Dropout(self.dr)# a9 P$ [$ W% Z# [8 C. ]: M
self.qa_outputs = L.Dense(
config.num_labels, / C, w" l. R9 \5 W" N
kernel_initializer=TruncatedNormal(stddev=config.initializer_range),) B) O; j \+ e7 j F1 h
dtype='float32',
name="qa_outputs")8 Y' h/ A; R6 p( }; i9 ^
@tf.function( u+ ^. [/ ^! P$ ~4 V6 F
def call(self, inputs, **kwargs):! y# r( X1 S, y- p3 q
_, _, hidden_states = self.bert(inputs, **kwargs)
hidden_states = self.concat([
hidden_states[-i] for i in range(1, self.hs+1); z( J/ B% L/ R5 i- g2 T
])
hidden_states = self.dropout(hidden_states, training=kwargs.get("training", False))
logits = self.qa_outputs(hidden_states)& u$ M* c$ l5 g( F4 }6 W2 u
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
return start_logits, end_logits' F; n4 y2 P* z/ U. `9 j6 j
定义训练函数# v& a4 }" P }5 ~1 w8 {& Q
( r3 j$ F$ J4 ~
def train(model, dataset, loss_fn, optimizer):
! ~ n! y. B9 E5 T3 ]
@tf.function
def train_step(model, inputs, y_true, loss_fn, optimizer):
with tf.GradientTape() as tape:
y_pred = model(inputs, training=True)
loss = loss_fn(y_true[0], y_pred[0])
loss += loss_fn(y_true[1], y_pred[1])7 q$ n, R$ Z# R" }+ g3 F4 h+ _
scaled_loss = optimizer.get_scaled_loss(loss)
5 P. g1 |! H3 ~& F2 n# |
scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables)! I8 m: Y& s! o5 f3 j0 S" a! }
gradients = optimizer.get_unscaled_gradients(scaled_gradients)9 e6 T f+ s* w8 \" Q4 A- i9 d( E2 V' [
optimizer.apply_gradients(zip(gradients, model.trainable_variables))1 Q! H! _& m5 F* Y4 q4 m2 q
return loss, y_pred
epoch_loss = 0.
for batch_num, sample in enumerate(dataset):
loss, y_pred = train_step(model, sample[:3], sample[4:6], loss_fn, optimizer)
epoch_loss += loss
print(
f"training ... batch {batch_num+1:03d} : ") L9 W* y: |& |6 m, {
f"train loss {epoch_loss/(batch_num+1):.3f} ",7 z* i9 Q, T7 W( I
end='\r')
定义预制函数
def predict(model, dataset, loss_fn, optimizer):
6 i: C2 J/ w1 l8 _- o+ y* F7 q1 I6 w
@tf.function ]+ P5 A# n9 c" o
def predict_step(model, inputs):
return model(inputs)# F" s, s- U- }* e" \+ x
% o5 Q" @) b% z
def to_numpy(*args):
out = []) X! h% e8 L* u! \! U& l
for arg in args:
if arg.dtype == tf.string:
arg = [s.decode('utf-8') for s in arg.numpy()]
out.append(arg)
else:6 o- _' j0 D8 Z! @: U! T2 y8 P
arg = arg.numpy()# p9 W0 i+ _# ]
out.append(arg)
return out
! o( L7 [2 D/ C, q
offset = tf.zeros([0, 128, 2], dtype=tf.dtypes.int32)
text = tf.zeros([0,], dtype=tf.dtypes.string)3 e* S- t. s2 }+ G+ f' Z7 }- E+ B* x
selected_text = tf.zeros([0,], dtype=tf.dtypes.string)
sentiment = tf.zeros([0,], dtype=tf.dtypes.string)0 _( ?$ O- N6 e: ], D
pred_start = tf.zeros([0, 128], dtype=tf.dtypes.float32)
pred_end = tf.zeros([0, 128], dtype=tf.dtypes.float32)
for batch_num, sample in enumerate(dataset):7 X+ g4 w+ p: I2 S/ w' J
print(f"predicting ... batch {batch_num+1:03d}"+" "*20, end='\r')& ^& `, ]# E+ m3 H: O
y_pred = predict_step(model, sample[:3])
# add batch to accumulators
pred_start = tf.concat((pred_start, y_pred[0]), axis=0)
pred_end = tf.concat((pred_end, y_pred[1]), axis=0)5 _6 v; D; I6 q
offset = tf.concat((offset, sample[3]), axis=0): L: R: Q( N( R, z. H% i( [
text = tf.concat((text, sample[6]), axis=0)
selected_text = tf.concat((selected_text, sample[7]), axis=0)
sentiment = tf.concat((sentiment, sample[8]), axis=0)
2 n* ~" ?2 m9 g8 c
pred_start, pred_end, text, selected_text, sentiment, offset = \
to_numpy(pred_start, pred_end, text, selected_text, sentiment, offset)
+ x7 l3 L: e2 M7 a, T
return pred_start, pred_end, text, selected_text, sentiment, offset
判断函数2 [2 q# i& X+ {) Y; h. e2 W
这个竞赛采用单词级Jaccard系数,计算公式如下6 {- T, p0 U& K# `
Jaccard系数计算的是你预测的单词在数据集中的个数,
def jaccard(str1, str2):) f R9 Q, O4 k3 ]
a = set(str1.lower().split())" t3 T, L# E$ o; |8 i* E+ o" q/ T' N+ I
b = set(str2.lower().split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
定义预测结果解码函数
解码函数通过模型预测拿到的start和end的index位置信息,然后和之前拿到的词向量在样本句子中的位置进行比较,将这个区间内的所有的单词都提取出来作为我们的预测结果。
def decode_prediction(pred_start, pred_end, text, offset, sentiment):
def decode(pred_start, pred_end, text, offset):
decoded_text = ""! w) L* e) ^4 R( h' _2 i
for i in range(pred_start, pred_end+1):% f0 J* A- u( i* X, p/ Z
decoded_text += text[offset[0]:offset[1]] T% ?. z. _7 b& g4 U7 ^
if (i+1) idx_end:
idx_end = idx_start
decoded_text = str(decode(idx_start, idx_end, text, offset))
if len(decoded_text) == 0:( u2 t; X \/ c: b N8 v
decoded_text = text0 \4 Y' J0 `8 D8 P
decoded_predictions.append(decoded_text)
return decoded_predictions
开始训练) g" T& _3 E0 {* a$ t ^ [
将训练数据分成5个folds,每个folds训练5个epoch,使用adam优化器,learning rate设置成3e-5,batch size使用32。: _9 y+ O8 l- s; R+ l% q) y9 Z$ ?$ L- V
. `+ {: }& T9 W4 ?3 N
num_folds = 50 b/ O* l9 ]7 @# ^; g
num_epochs = 5 N. \2 F* v# Z
batch_size = 32
learning_rate = 3e-5& A/ G2 a9 n& X z6 c
optimizer = tf.keras.optimizers.Adam(learning_rate)
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
optimizer, 'dynamic')
config = BertConfig(output_hidden_states=True, num_labels=2)
model = BertModel.from_pretrained(PATH, config=config)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)1 E0 n" V2 {, U z% y2 |
kfold = model_selection.KFold() V3 f4 f( x, I2 x0 f, T) i* y/ l
n_splits=num_folds, shuffle=True, random_state=42)3 ?4 u4 g8 n' B1 C9 w( P' Q
test_preds_start = np.zeros((len(test_df), 128), dtype=np.float32)) @7 Q8 W O8 _: Z9 V
test_preds_end = np.zeros((len(test_df), 128), dtype=np.float32)! d, ?& I# i6 m% ]
for fold_num, (train_idx, valid_idx) in enumerate(kfold.split(train_df.text)):
print("\nfold %02d" % (fold_num+1))5 D Z" F K4 \. O% z& O. O
# 创建train, valid, test数据集
train_dataset = TweetDataset.create(
train_df.iloc[train_idx], batch_size, shuffle_buffer_size=2048)0 O) d- x J/ G, y
valid_dataset = TweetDataset.create(' ~9 l5 ~: B0 B2 u
train_df.iloc[valid_idx], batch_size, shuffle_buffer_size=-1)
test_dataset = TweetDataset.create(% {; M9 q i; F6 _) s" M; h
test_df, batch_size, shuffle_buffer_size=-1)
' {) p$ T. ~) X9 Q. [( t
best_score = float('-inf')7 p+ K6 n6 q/ o) w9 Y( g
for epoch_num in range(num_epochs):; b/ I7 M- M& w$ }2 u
print("\nepoch %03d" % (epoch_num+1))4 _/ Z- s3 z7 V3 s3 S
9 u/ K8 [. |2 G0 @- R
train(model, train_dataset, loss_fn, optimizer)
pred_start, pred_end, text, selected_text, sentiment, offset = \
predict(model, valid_dataset, loss_fn, optimizer)$ I1 A' |* F2 v4 D8 |. [
selected_text_pred = decode_prediction(
pred_start, pred_end, text, offset, sentiment)
jaccards = []
for i in range(len(selected_text)):( U8 E, d/ w: P' G' k+ p4 Z6 a) k
jaccards.append(
jaccard(selected_text, selected_text_pred))
: a2 Z( f1 ]: j, f% X2 U* k( x
score = np.mean(jaccards)& f1 A" x0 o T2 B
print(f"valid jaccard epoch {epoch_num+1:03d}: {score}"+" "*15) I- o6 B) ]# [4 }! G
; n+ g& O" ?7 H* O. U
if score > best_score:5 L& l q, }+ b+ | I
best_score = score* L. ]& e3 {7 s
% r: a+ T. }6 A" v" N
# predict test set* d6 n* ]! C w3 l4 p
test_pred_start, test_pred_end, test_text, _, test_sentiment, test_offset = \7 Y3 S4 R/ z9 K$ J6 I
predict(model, test_dataset, loss_fn, optimizer)- h5 d" m" Z6 ?$ N
$ f! N( j4 D% n. z6 H |6 J' r8 K/ p `
test_preds_start += test_pred_start * 0.2
test_preds_end += test_pred_end * 0.2' v9 ?, U4 `# M% F
# 重置模型,避免OOM
session = tf.compat.v1.get_default_session()
graph = tf.compat.v1.get_default_graph()3 r/ x* E8 R8 J- C$ y8 @/ ]5 e9 c0 e" Q
del session, graph, model
model = BertModel.from_pretrained(PATH, config=config)% i$ ?. `* j: C, F
预测测试数据,并生成提交文件6 V! n* E$ j. p0 g% }
selected_text_pred = decode_prediction(" B& l2 B! P I: D1 r+ \+ s
test_preds_start, test_preds_end, test_text, test_offset, test_sentiment)$ K' e" B2 ]0 a% y
def f(selected):
return " ".join(set(selected.lower().split()))
submission_df.loc[:, 'selected_text'] = selected_text_pred& \; R# o, R/ K4 k" f2 E, d
submission_df['selected_text'] = submission_df['selected_text'].map(f)1 v, [7 [6 d3 D' X$ F. B2 V
submission_df.to_csv("submission.csv", index=False)
这个方案在提交的时候在553个队伍中排名153位, 分数为0.68。& ~# v4 V; l* c* [1 Q T
成为第一个吐槽的人