-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutils.py
120 lines (96 loc) · 4.1 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import pickle
import torch
import torchvision
from typing import Tuple
from torch.utils.data import DataLoader
import math
from torch import optim
def load_state_dict(model, fname):
"""
Set parameters converted from Caffe models authors of VGGFace2 provide.
See https://www.robots.ox.ac.uk/~vgg/data/vgg_face2/.
Arguments:
model: model
fname: file name of parameters converted from a Caffe model, assuming the file format is Pickle.
"""
with open(fname, 'rb') as f:
weights = pickle.load(f, encoding='latin1')
own_state = model.state_dict()
for name, param in weights.items():
if name in own_state:
try:
own_state[name].copy_(torch.from_numpy(param))
except Exception:
raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose '\
'dimensions in the checkpoint are {}.'.format(name, own_state[name].size(), param.size()))
#else:
# raise KeyError('unexpected key "{}" in state_dict'.format(name))
def get_mean_and_std(dataset: torchvision.datasets.ImageFolder) -> Tuple[float, float]:
"""Compute mean and std for the dataset.
Args:
dataset: the dataset.
Returns:
The mean and the std on each channels computed over the dataset.
"""
dataset_loader = DataLoader(
dataset, batch_size=512, shuffle=False, pin_memory=True, num_workers=1
)
sums = torch.zeros(3)
sums_of_square = torch.zeros(3)
count = 0
for images, _ in dataset_loader:
b, _, h, w = images.shape
num_pix_in_batch = b * h * w
sums += torch.sum(images, dim=[0, 2, 3])
sums_of_square += torch.sum(images ** 2, dim=[0, 2, 3])
count += num_pix_in_batch
mean = sums / count
var = (sums_of_square / count) - (mean ** 2)
std = torch.sqrt(var)
return mean, std
def adjust_learning_rate(args, optimizer, loader, step):
max_steps = args.epochs * len(loader)
warmup_steps = 10 * len(loader)
base_lr = args.batch_size / 256
if step < warmup_steps:
lr = base_lr * step / warmup_steps
else:
step -= warmup_steps
max_steps -= warmup_steps
q = 0.5 * (1 + math.cos(math.pi * step / max_steps))
end_lr = base_lr * 0.001
lr = base_lr * q + end_lr * (1 - q)
optimizer.param_groups[0]['lr'] = lr * args.learning_rate_weights
optimizer.param_groups[1]['lr'] = lr * args.learning_rate_biases
class LARS(optim.Optimizer):
def __init__(self, params, lr, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=False, lars_adaptation_filter=False):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
def exclude_bias_and_norm(self, p):
return p.ndim == 1
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if not g['weight_decay_filter'] or not self.exclude_bias_and_norm(p):
dp = dp.add(p, alpha=g['weight_decay'])
if not g['lars_adaptation_filter'] or not self.exclude_bias_and_norm(p):
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['eta'] * param_norm / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])