-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_sparse_vs_hybrid.py
34 lines (25 loc) · 1.11 KB
/
train_sparse_vs_hybrid.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from sentence_transformers import SentenceTransformer, InputExample, losses, util,evaluation
from torch.utils.data import DataLoader
import pickle
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CECorrelationEvaluator
import math,logging
train_set=[]
labels=open('train_labels_sparse_vs_dense_T50.tsv','r').readlines()
for line in labels:
qid,qtext,doctext,label=line.rstrip().split('\t')
train_set.append( InputExample(texts=[qtext,doctext],label=int(label) ))
print(len(train_set))
batch_size=16
train_dataloader = DataLoader(train_set, shuffle=True, batch_size=batch_size)
epoch_num=1
warmup_steps = math.ceil(len(train_dataloader) * epoch_num * 0.1) #10% of train data for warm-up
model_name='bert-base-uncased'
model = CrossEncoder(model_name, num_labels=1)
model_name="sparse_vs_hybrid_"+model_name+"_e"+str(epoch_num)+'_b'+str(batch_size)
# Train the model
model.fit(train_dataloader=train_dataloader,
epochs=epoch_num,
warmup_steps=warmup_steps,
output_path='models/'+model_name)
model.save('models/'+model_name)