-
Notifications
You must be signed in to change notification settings - Fork 74
/
Copy pathmodel.py
105 lines (84 loc) · 3.36 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class SkipGramModel(nn.Module):
"""Skip gram model of word2vec.
Attributes:
emb_size: Embedding size.
emb_dimention: Embedding dimention, typically from 50 to 500.
u_embedding: Embedding for center word.
v_embedding: Embedding for neibor words.
"""
def __init__(self, emb_size, emb_dimension):
"""Initialize model parameters.
Apply for two embedding layers.
Initialize layer weight
Args:
emb_size: Embedding size.
emb_dimention: Embedding dimention, typically from 50 to 500.
Returns:
None
"""
super(SkipGramModel, self).__init__()
self.emb_size = emb_size
self.emb_dimension = emb_dimension
self.u_embeddings = nn.Embedding(emb_size, emb_dimension, sparse=True)
self.v_embeddings = nn.Embedding(emb_size, emb_dimension, sparse=True)
self.init_emb()
def init_emb(self):
"""Initialize embedding weight like word2vec.
The u_embedding is a uniform distribution in [-0.5/em_size, 0.5/emb_size], and the elements of v_embedding are zeroes.
Returns:
None
"""
initrange = 0.5 / self.emb_dimension
self.u_embeddings.weight.data.uniform_(-initrange, initrange)
self.v_embeddings.weight.data.uniform_(-0, 0)
def forward(self, pos_u, pos_v, neg_v):
"""Forward process.
As pytorch designed, all variables must be batch format, so all input of this method is a list of word id.
Args:
pos_u: list of center word ids for positive word pairs.
pos_v: list of neibor word ids for positive word pairs.
neg_u: list of center word ids for negative word pairs.
neg_v: list of neibor word ids for negative word pairs.
Returns:
Loss of this process, a pytorch variable.
"""
emb_u = self.u_embeddings(pos_u)
emb_v = self.v_embeddings(pos_v)
score = torch.mul(emb_u, emb_v).squeeze()
score = torch.sum(score, dim=1)
score = F.logsigmoid(score)
neg_emb_v = self.v_embeddings(neg_v)
neg_score = torch.bmm(neg_emb_v, emb_u.unsqueeze(2)).squeeze()
neg_score = F.logsigmoid(-1 * neg_score)
return -1 * (torch.sum(score)+torch.sum(neg_score))
def save_embedding(self, id2word, file_name, use_cuda):
"""Save all embeddings to file.
As this class only record word id, so the map from id to word has to be transfered from outside.
Args:
id2word: map from word id to word.
file_name: file name.
Returns:
None.
"""
if use_cuda:
embedding = self.u_embeddings.weight.cpu().data.numpy()
else:
embedding = self.u_embeddings.weight.data.numpy()
fout = open(file_name, 'w')
fout.write('%d %d\n' % (len(id2word), self.emb_dimension))
for wid, w in id2word.items():
e = embedding[wid]
e = ' '.join(map(lambda x: str(x), e))
fout.write('%s %s\n' % (w, e))
def test():
model = SkipGramModel(100, 100)
id2word = dict()
for i in range(100):
id2word[i] = str(i)
model.save_embedding(id2word)
if __name__ == '__main__':
test()