-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathadaptive_wing_loss.py
48 lines (41 loc) · 1.55 KB
/
adaptive_wing_loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
# -*- coding: utf-8 -*-
# @Time : 2019/9/9
# @Author : Elliott Zheng
# @Email : [email protected]
import torch
from torch import nn
# torch.log and math.log is e based
class AdaptiveWingLoss(nn.Module):
def __init__(self, omega=14, theta=0.5, epsilon=1, alpha=2.1):
super(AdaptiveWingLoss, self).__init__()
self.omega = omega
self.theta = theta
self.epsilon = epsilon
self.alpha = alpha
def forward(self, pred, target):
'''
:param pred: BxNxHxH
:param target: BxNxHxH
:return:
'''
y = target
y_hat = pred
delta_y = (y - y_hat).abs()
delta_y1 = delta_y[delta_y < self.theta]
delta_y2 = delta_y[delta_y >= self.theta]
y1 = y[delta_y < self.theta]
y2 = y[delta_y >= self.theta]
loss1 = self.omega * torch.log(1 + torch.pow(delta_y1 / self.omega, self.alpha - y1))
A = self.omega * (1 / (1 + torch.pow(self.theta / self.epsilon, self.alpha - y2))) * (self.alpha - y2) * (
torch.pow(self.theta / self.epsilon, self.alpha - y2 - 1)) * (1 / self.epsilon)
C = self.theta * A - self.omega * torch.log(1 + torch.pow(self.theta / self.epsilon, self.alpha - y2))
loss2 = A * delta_y2 - C
return (loss1.sum() + loss2.sum()) / (len(loss1) + len(loss2))
if __name__ == "__main__":
loss_func = AdaptiveWingLoss()
y = torch.ones(2, 68, 64, 64)
y_hat = torch.zeros(2, 68, 64, 64)
y_hat.requires_grad_(True)
loss = loss_func(y_hat, y)
loss.backward()
print(loss)