-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdataset_train_val.py
139 lines (130 loc) · 6.29 KB
/
dataset_train_val.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from itertools import chain
from sklearn.model_selection import train_test_split
from loss_utils import *
from segment_metrics import IOU_eval
class Dataset_train_val(nn.Module):
def __init__(self,val_batch_size,use_gpu):
super(Dataset_train_val,self).__init__()
self.val_batch_size = val_batch_size
self.use_gpu = use_gpu
def load_train_val_test(self, x_train_name, y_train_name, x_test_name, split_ratio=0.2):
x_train = torch.load(x_train_name) # 'x_train.pt'
y_train = torch.load(y_train_name) # 'y_train.pt')
x_test = torch.load(x_test_name) # 'x_test.pt')
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=split_ratio)
return x_train, x_val, y_train, y_val, x_test
def eval(self, metrics, y_true, y_pred, thresh=0.5):
output = {}
y_true = y_true > thresh
y_true = y_true.detach().cpu()
y_pred = y_pred.detach().cpu()
for metric in metrics:
if metric == 'DICE':
output['DICE'] = DiceBCELoss().dice_loss(y_true, y_pred).item()
elif metric == 'BCE':
output['BCE'] = DiceBCELoss().bce_loss(y_true, y_pred).item()
elif metric == 'IoU':
output['IoU'] = IOU_eval().iou_evaluate_better(y_true.int(), y_pred).item()
return output
def train_step(self, inputs, labels, optimizer, criterion, unet, metrics=None):
unet.train()
optimizer.zero_grad()
outputs = unet(inputs)
outputs = outputs.permute(0, 2, 3, 1)
labels = labels.permute(0, 2, 3, 1)
loss = criterion(outputs, labels) # F.cross_entropy
evals = self.eval(metrics, labels, outputs)
evals['loss'] = loss.item()
loss.backward()
optimizer.step()
return evals
def get_val_metrics(self, x_val, y_val, criterion, unet, metrics):
unet.eval() # Must tell batch norm and dropout layers that model is now doing inference
batch_size=self.val_batch_size
epoch_iter = np.ceil(x_val.shape[0] / batch_size).astype(int)
total_loss = 0
total_evals = dict(('val_' + metric, 0) for metric in metrics)
if self.use_gpu:
x_val = x_val.cuda()
y_val = y_val.cuda()
with torch.no_grad():
for i in range(epoch_iter):
batch_val_x = x_val[i * batch_size : (i + 1) * batch_size]
batch_val_y =y_val[i * batch_size : (i + 1) * batch_size]
outputs = unet(batch_val_x)
outputs = outputs.permute(0, 2, 3, 1)
batch_val_y = batch_val_y.permute(0, 2, 3, 1)
# outputs.shape =(batch_size, img_cols, img_rows, n_classes)
evals = self.eval(metrics, batch_val_y.float(), outputs.float())
loss = criterion(outputs.float(), batch_val_y.float()) # F.cross_entropy
total_loss += loss.data
for key in evals.keys():
total_evals['val_' + key] += evals[key]
for key in total_evals.keys():
total_evals[key] = total_evals[key] / epoch_iter
total_evals['val_loss'] = (total_loss / epoch_iter).item()
return total_evals
def hybrid_loss(self, loss_1, loss_2):
min_vals = torch.minimum(loss_1,loss_2)
coeffs_1 = torch.sin((np.pi/2)*min_vals/loss_1)**self.power
coeffs_2 = torch.sin((np.pi/2)*min_vals/loss_2)**self.power
coeffs_1 = coeffs_1/(coeffs_1 + coeffs_2)
coeffs_2 = coeffs_2/(coeffs_1 + coeffs_2)
if self.swap_coeffs == True:
coeffs_1 = coeffs_1 + coeffs_2
coeffs_2 = coeffs_1 - coeffs_2
coeffs_1 = coeffs_1 - coeffs_2
return (coeffs_1.detach()*loss_1 + coeffs_2.detach()*loss_2).mean()
def train_step_hybrid(self, inputs, labels, optimizer, criterion, unet, hybrid_loss, metrics=None):
unet.train()
optimizer.zero_grad()
outputs, outputs2 = unet(inputs)
outputs = outputs.permute(0, 2, 3, 1)
outputs2 = outputs2.permute(0, 2, 3, 1)
labels = labels.permute(0, 2, 3, 1)
outputs_avg = 0.5*(outputs + outputs2)
loss1 = criterion(outputs.float(), labels.float()) # F.cross_entropy
loss2 = criterion(outputs2.float(), labels.float())
loss = hybrid_loss(loss1, loss2)
evals = self.eval(metrics, labels, outputs_avg)
evals['loss'] = loss.item()
loss.backward()
optimizer.step()
return evals
def get_val_metrics_hybrid(self, x_val, y_val, criterion, unet, metrics=None):
unet.eval() # Must tell batch norm and dropout layers that model is now doing inference
batch_size=self.val_batch_size
epoch_iter = np.ceil(x_val.shape[0] / batch_size).astype(int)
total_loss = 0
total_evals = {}
with torch.no_grad():
for i in range(epoch_iter):
batch_val_x = x_val[i * batch_size : (i + 1) * batch_size]
batch_val_y = y_val[i * batch_size : (i + 1) * batch_size]
if self.use_gpu:
batch_val_x = batch_val_x.cuda()
batch_val_y = batch_val_y.cuda()
m = batch_val_x.shape[0]
outputs, outputs2 = unet(batch_val_x)
outputs = outputs.permute(0, 2, 3, 1)
outputs2 = outputs2.permute(0, 2, 3, 1)
batch_val_y = batch_val_y.permute(0, 2, 3, 1)
# outputs.shape =(batch_size, img_cols, img_rows, n_classes)
loss1 = criterion(outputs.float(), batch_val_y.float()) # F.cross_entropy
loss2 = criterion(outputs2.float(), batch_val_y.float())
total_loss += self.hybrid_loss(loss1, loss2).data
outputs_avg = 0.5*(outputs + outputs2)
evals = self.eval(metrics, batch_val_y.float(), outputs_avg.float())
for key in evals.keys():
total_evals['val_' + key] += evals[key]
for key in total_evals.keys():
total_evals[key] = total_evals[key] / epoch_iter
total_evals['val_loss'] = total_loss / epoch_iter
return total_evals