-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathloss.py
44 lines (32 loc) · 1.39 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
import torch
import torch.nn as nn
import torch.nn.functional as F
class BinaryCELoss(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, pos_score, neg_score, probs):
loss = -F.logsigmoid(pos_score.squeeze()) + torch.sum(F.softplus(neg_score) / neg_score.size(2), dim=-1)
return loss
class BPRLoss(nn.Module):
def __init__(self):
nn.Module.__init__(self)
def forward(self, pos_score, neg_score, probs):
# only supports one negative sample
loss = -F.logsigmoid(pos_score.squeeze() - neg_score.squeeze())
return loss
class WeightedBinaryCELoss(nn.Module):
def __init__(self, temperature=1.0):
nn.Module.__init__(self)
self.temperature = temperature
def forward(self, pos_score, neg_score, probs):
weight = F.softmax(neg_score / self.temperature, -1)
loss = -F.logsigmoid(pos_score.squeeze()) + torch.sum(F.softplus(neg_score) * weight, dim=-1)
return loss
class WeightedProbBinaryCELoss(nn.Module):
def __init__(self, temperature=1.0):
nn.Module.__init__(self,)
self.temperature = temperature
def forward(self, pos_score, neg_score, probs):
weight = F.softmax(neg_score / self.temperature - torch.log(probs), -1)
loss = -F.logsigmoid(pos_score.squeeze()) + torch.sum(F.softplus(neg_score) * weight, dim=-1)
return loss