-
-
Notifications
You must be signed in to change notification settings - Fork 153
/
utils.py
59 lines (44 loc) · 1.66 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import torch
import numpy as np
from torch.autograd import Variable
from collections import defaultdict, Counter, OrderedDict
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered"""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return x
def idx2word(idx, i2w, pad_idx):
sent_str = [str()]*len(idx)
for i, sent in enumerate(idx):
for word_id in sent:
if word_id == pad_idx:
break
sent_str[i] += i2w[str(word_id.item())] + " "
sent_str[i] = sent_str[i].strip()
return sent_str
def interpolate(start, end, steps):
interpolation = np.zeros((start.shape[0], steps + 2))
for dim, (s, e) in enumerate(zip(start, end)):
interpolation[dim] = np.linspace(s, e, steps+2)
return interpolation.T
def expierment_name(args, ts):
exp_name = str()
exp_name += "BS=%i_" % args.batch_size
exp_name += "LR={}_".format(args.learning_rate)
exp_name += "EB=%i_" % args.embedding_size
exp_name += "%s_" % args.rnn_type.upper()
exp_name += "HS=%i_" % args.hidden_size
exp_name += "L=%i_" % args.num_layers
exp_name += "BI=%i_" % args.bidirectional
exp_name += "LS=%i_" % args.latent_size
exp_name += "WD={}_".format(args.word_dropout)
exp_name += "ANN=%s_" % args.anneal_function.upper()
exp_name += "K={}_".format(args.k)
exp_name += "X0=%i_" % args.x0
exp_name += "TS=%s" % ts
return exp_name