forked from chxy95/Deep-Mutual-Learning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrainer.py
377 lines (314 loc) · 13.9 KB
/
trainer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 10:45:48 2019
@author: chxy
"""
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F
import os
import time
import shutil
from tqdm import tqdm
from utils import accuracy, AverageMeter
from resnet import resnet32
from tensorboard_logger import configure, log_value
class Trainer(object):
"""
Trainer encapsulates all the logic necessary for
training the MobileNet Model.
All hyperparameters are provided by the user in the
config file.
"""
def __init__(self, config, data_loader):
"""
Construct a new Trainer instance.
Args
----
- config: object containing command line arguments.
- data_loader: data iterator
"""
self.config = config
# data params
if config.is_train:
self.train_loader = data_loader[0]
self.valid_loader = data_loader[1]
self.num_train = len(self.train_loader.dataset)
self.num_valid = len(self.valid_loader.dataset)
else:
self.test_loader = data_loader
self.num_test = len(self.test_loader.dataset)
self.num_classes = config.num_classes
# training params
self.epochs = config.epochs
self.start_epoch = 0
self.momentum = config.momentum
self.lr = config.init_lr
self.weight_decay = config.weight_decay
self.nesterov = config.nesterov
self.gamma = config.gamma
# misc params
self.use_gpu = config.use_gpu
self.best = config.best
self.ckpt_dir = config.ckpt_dir
self.logs_dir = config.logs_dir
self.counter = 0
self.lr_patience = config.lr_patience
self.train_patience = config.train_patience
self.use_tensorboard = config.use_tensorboard
self.resume = config.resume
self.print_freq = config.print_freq
self.model_name = config.save_name
self.model_num = config.model_num
self.models = []
self.optimizers = []
self.schedulers = []
self.loss_kl = nn.KLDivLoss(reduction='batchmean')
self.loss_ce = nn.CrossEntropyLoss()
self.best_valid_accs = [0.] * self.model_num
# configure tensorboard logging
if self.use_tensorboard:
tensorboard_dir = self.logs_dir + self.model_name
print('[*] Saving tensorboard logs to {}'.format(tensorboard_dir))
if not os.path.exists(tensorboard_dir):
os.makedirs(tensorboard_dir)
configure(tensorboard_dir)
for i in range(self.model_num):
# build models
model = resnet32()
if self.use_gpu:
model.cuda()
self.models.append(model)
# initialize optimizer and scheduler
optimizer = optim.SGD(model.parameters(), lr=self.lr, momentum=self.momentum,
weight_decay=self.weight_decay, nesterov=self.nesterov)
self.optimizers.append(optimizer)
# set learning rate decay
scheduler = optim.lr_scheduler.StepLR(self.optimizers[i], step_size=60, gamma=self.gamma, last_epoch=-1)
self.schedulers.append(scheduler)
print('[*] Number of parameters of one model: {:,}'.format(
sum([p.data.nelement() for p in self.models[0].parameters()])))
def train(self):
"""
Train the model on the training set.
A checkpoint of the model is saved after each epoch
and if the validation accuracy is improved upon,
a separate ckpt is created for use on the test set.
"""
# load the most recent checkpoint
if self.resume:
self.load_checkpoint(best=False)
print("\n[*] Train on {} samples, validate on {} samples".format(
self.num_train, self.num_valid)
)
for epoch in range(self.start_epoch, self.epochs):
for scheduler in self.schedulers:
scheduler.step(epoch)
print(
'\nEpoch: {}/{} - LR: {:.6f}'.format(
epoch+1, self.epochs, self.optimizers[0].param_groups[0]['lr'],)
)
# train for 1 epoch
train_losses, train_accs = self.train_one_epoch(epoch)
# evaluate on validation set
valid_losses, valid_accs = self.validate(epoch)
for i in range(self.model_num):
is_best = valid_accs[i].avg> self.best_valid_accs[i]
msg1 = "model_{:d}: train loss: {:.3f} - train acc: {:.3f} "
msg2 = "- val loss: {:.3f} - val acc: {:.3f}"
if is_best:
#self.counter = 0
msg2 += " [*]"
msg = msg1 + msg2
print(msg.format(i+1, train_losses[i].avg, train_accs[i].avg, valid_losses[i].avg, valid_accs[i].avg))
# check for improvement
#if not is_best:
#self.counter += 1
#if self.counter > self.train_patience:
#print("[!] No improvement in a while, stopping training.")
#return
self.best_valid_accs[i] = max(valid_accs[i].avg, self.best_valid_accs[i])
self.save_checkpoint(i,
{'epoch': epoch + 1,
'model_state': self.models[i].state_dict(),
'optim_state': self.optimizers[i].state_dict(),
'best_valid_acc': self.best_valid_accs[i],
}, is_best
)
def train_one_epoch(self, epoch):
"""
Train the model for 1 epoch of the training set.
An epoch corresponds to one full pass through the entire
training set in successive mini-batches.
This is used by train() and should not be called manually.
"""
batch_time = AverageMeter()
losses = []
accs = []
for i in range(self.model_num):
self.models[i].train()
losses.append(AverageMeter())
accs.append(AverageMeter())
tic = time.time()
with tqdm(total=self.num_train) as pbar:
for i, (images, labels) in enumerate(self.train_loader):
if self.use_gpu:
images, labels = images.cuda(), labels.cuda()
images, labels = Variable(images), Variable(labels)
#forward pass
outputs=[]
for model in self.models:
outputs.append(model(images))
for i in range(self.model_num):
ce_loss = self.loss_ce(outputs[i], labels)
kl_loss = 0
for j in range(self.model_num):
if i!=j:
kl_loss += self.loss_kl(F.log_softmax(outputs[i], dim = 1),
F.softmax(Variable(outputs[j]), dim=1))
loss = ce_loss + kl_loss / (self.model_num - 1)
# measure accuracy and record loss
prec = accuracy(outputs[i].data, labels.data, topk=(1,))[0]
losses[i].update(loss.item(), images.size()[0])
accs[i].update(prec.item(), images.size()[0])
# compute gradients and update SGD
self.optimizers[i].zero_grad()
loss.backward()
self.optimizers[i].step()
# measure elapsed time
toc = time.time()
batch_time.update(toc-tic)
pbar.set_description(
(
"{:.1f}s - model1_loss: {:.3f} - model1_acc: {:.3f}".format(
(toc-tic), losses[0].avg, accs[0].avg
)
)
)
self.batch_size = images.shape[0]
pbar.update(self.batch_size)
# log to tensorboard
if self.use_tensorboard:
iteration = epoch*len(self.train_loader) + i
for i in range(self.model_num):
log_value('train_loss_%d' % (i+1), losses[i].avg, iteration)
log_value('train_acc_%d' % (i+1), accs[i].avg, iteration)
return losses, accs
def validate(self, epoch):
"""
Evaluate the model on the validation set.
"""
losses = []
accs = []
for i in range(self.model_num):
self.models[i].eval()
losses.append(AverageMeter())
accs.append(AverageMeter())
for i, (images, labels) in enumerate(self.valid_loader):
if self.use_gpu:
images, labels = images.cuda(), labels.cuda()
images, labels = Variable(images), Variable(labels)
#forward pass
outputs=[]
for model in self.models:
outputs.append(model(images))
for i in range(self.model_num):
ce_loss = self.loss_ce(outputs[i], labels)
kl_loss = 0
for j in range(self.model_num):
if i!=j:
kl_loss += self.loss_kl(F.log_softmax(outputs[i], dim = 1),
F.softmax(Variable(outputs[j]), dim=1))
loss = ce_loss + kl_loss / (self.model_num - 1)
# measure accuracy and record loss
prec = accuracy(outputs[i].data, labels.data, topk=(1,))[0]
losses[i].update(loss.item(), images.size()[0])
accs[i].update(prec.item(), images.size()[0])
# log to tensorboard for every epoch
if self.use_tensorboard:
for i in range(self.model_num):
log_value('valid_loss_%d' % (i+1), losses[i].avg, epoch+1)
log_value('valid_acc_%d' % (i+1), accs[i].avg, epoch+1)
return losses, accs
def test(self):
"""
Test the model on the held-out test data.
This function should only be called at the very
end once the model has finished training.
"""
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# load the best checkpoint
self.load_checkpoint(best=self.best)
self.model.eval()
for i, (images, labels) in enumerate(self.test_loader):
if self.use_gpu:
images, labels = images.cuda(), labels.cuda()
images, labels = Variable(images), Variable(labels)
#forward pass
outputs = self.model(images)
loss = self.loss_fn(outputs, labels)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, labels.data, topk=(1, 5))
losses.update(loss.item(), images.size()[0])
top1.update(prec1.item(), images.size()[0])
top5.update(prec5.item(), images.size()[0])
print(
'[*] Test loss: {:.3f}, top1_acc: {:.3f}%, top5_acc: {:.3f}%'.format(
losses.avg, top1.avg, top5.avg)
)
def save_checkpoint(self, i, state, is_best):
"""
Save a copy of the model so that it can be loaded at a future
date. This function is used when the model is being evaluated
on the test data.
If this model has reached the best validation accuracy thus
far, a seperate file with the suffix `best` is created.
"""
# print("[*] Saving model to {}".format(self.ckpt_dir))
filename = self.model_name + str(i+1) + '_ckpt.pth.tar'
ckpt_path = os.path.join(self.ckpt_dir, filename)
torch.save(state, ckpt_path)
if is_best:
filename = self.model_name + str(i+1) + '_model_best.pth.tar'
shutil.copyfile(
ckpt_path, os.path.join(self.ckpt_dir, filename)
)
'''def load_checkpoint(self, best=False):
"""
Load the best copy of a model. This is useful for 2 cases:
- Resuming training with the most recent model checkpoint.
- Loading the best validation model to evaluate on the test data.
Params
------
- best: if set to True, loads the best model. Use this if you want
to evaluate your model on the test data. Else, set to False in
which case the most recent version of the checkpoint is used.
"""
print("[*] Loading model from {}".format(self.ckpt_dir))
filename = self.model_name + '_ckpt.pth.tar'
if best:
filename = self.model_name + '_model_best.pth.tar'
ckpt_path = os.path.join(self.ckpt_dir, filename)
ckpt = torch.load(ckpt_path)
# load variables from checkpoint
self.start_epoch = ckpt['epoch']
self.best_valid_acc = ckpt['best_valid_acc']
self.model.load_state_dict(ckpt['model_state'])
self.optimizer.load_state_dict(ckpt['optim_state'])
if best:
print(
"[*] Loaded {} checkpoint @ epoch {} "
"with best valid acc of {:.3f}".format(
filename, ckpt['epoch'], ckpt['best_valid_acc'])
)
else:
print(
"[*] Loaded {} checkpoint @ epoch {}".format(
filename, ckpt['epoch'])
)'''