-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlab9-6_batchnorm.py
More file actions
129 lines (102 loc) · 4.87 KB
/
lab9-6_batchnorm.py
File metadata and controls
129 lines (102 loc) · 4.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# Lab 10 MNIST and softmax
import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pylab as plt
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# for reproducibility
#torch.manual_seed(1)
#if device == 'cuda':
# torch.cuda.manual_seed_all(1)
# parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 32
# MNIST dataset
mnist_train = dsets.MNIST(root='MNIST_data/',train=True,transform=transforms.ToTensor(),download=True)
mnist_test = dsets.MNIST(root='MNIST_data/',train=False,transform=transforms.ToTensor(),download=True)
# dataset loader
train_loader = torch.utils.data.DataLoader(dataset=mnist_train,batch_size=batch_size,shuffle=True,drop_last=True)
test_loader = torch.utils.data.DataLoader(dataset=mnist_test,batch_size=batch_size,shuffle=False,drop_last=True)
# nn layers
linear1 = torch.nn.Linear(784, 32, bias=True)
linear2 = torch.nn.Linear(32, 32, bias=True)
linear3 = torch.nn.Linear(32, 10, bias=True)
relu = torch.nn.ReLU()
bn1 = torch.nn.BatchNorm1d(32)
bn2 = torch.nn.BatchNorm1d(32)
nn_linear1 = torch.nn.Linear(784, 32, bias=True)
nn_linear2 = torch.nn.Linear(32, 32, bias=True)
nn_linear3 = torch.nn.Linear(32, 10, bias=True)
# model
bn_model = torch.nn.Sequential(linear1, bn1, relu,linear2, bn2, relu,linear3).to(device)
nn_model = torch.nn.Sequential(nn_linear1, relu,nn_linear2, relu,nn_linear3).to(device)
# define cost/loss & optimizer
criterion = torch.nn.CrossEntropyLoss().to(device) # Softmax is internally computed.
bn_optimizer = torch.optim.Adam(bn_model.parameters(), lr=learning_rate)
nn_optimizer = torch.optim.Adam(nn_model.parameters(), lr=learning_rate)
# Save Losses and Accuracies every epoch
# We are going to plot them later
train_losses = []
train_accs = []
valid_losses = []
valid_accs = []
train_total_batch = len(train_loader)
test_total_batch = len(test_loader)
for epoch in range(training_epochs):
bn_model.train() # set the model to train mode
for X, Y in train_loader:
# reshape input image into [batch_size by 784]
# label is not one-hot encoded
X = X.view(-1, 28 * 28).to(device)
Y = Y.to(device)
bn_optimizer.zero_grad()
bn_prediction = bn_model(X)
bn_loss = criterion(bn_prediction, Y)
bn_loss.backward()
bn_optimizer.step()
nn_optimizer.zero_grad()
nn_prediction = nn_model(X)
nn_loss = criterion(nn_prediction, Y)
nn_loss.backward()
nn_optimizer.step()
with torch.no_grad():
bn_model.eval() # set the model to evaluation mode
# Test the model using train sets
bn_loss, nn_loss, bn_acc, nn_acc = 0, 0, 0, 0
for i, (X, Y) in enumerate(train_loader):
X = X.view(-1, 28 * 28).to(device)
Y = Y.to(device)
bn_prediction = bn_model(X)
bn_correct_prediction = torch.argmax(bn_prediction, 1) == Y
bn_loss += criterion(bn_prediction, Y)
bn_acc += bn_correct_prediction.float().mean()
nn_prediction = nn_model(X)
nn_correct_prediction = torch.argmax(nn_prediction, 1) == Y
nn_loss += criterion(nn_prediction, Y)
nn_acc += nn_correct_prediction.float().mean()
bn_loss, nn_loss, bn_acc, nn_acc = bn_loss / train_total_batch, nn_loss / train_total_batch, bn_acc / train_total_batch, nn_acc / train_total_batch
# Save train losses/acc
train_losses.append([bn_loss, nn_loss])
train_accs.append([bn_acc, nn_acc])
print('[Epoch %d-TRAIN] Batchnorm Loss(Acc): bn_loss:%.5f(bn_acc:%.2f) vs No Batchnorm Loss(Acc): nn_loss:%.5f(nn_acc:%.2f)' % ((epoch + 1), bn_loss.item(), bn_acc.item(), nn_loss.item(), nn_acc.item()))
# Test the model using test sets
bn_loss, nn_loss, bn_acc, nn_acc = 0, 0, 0, 0
for i, (X, Y) in enumerate(test_loader):
X = X.view(-1, 28 * 28).to(device)
Y = Y.to(device)
bn_prediction = bn_model(X)
bn_correct_prediction = torch.argmax(bn_prediction, 1) == Y
bn_loss += criterion(bn_prediction, Y)
bn_acc += bn_correct_prediction.float().mean()
nn_prediction = nn_model(X)
nn_correct_prediction = torch.argmax(nn_prediction, 1) == Y
nn_loss += criterion(nn_prediction, Y)
nn_acc += nn_correct_prediction.float().mean()
bn_loss, nn_loss, bn_acc, nn_acc = bn_loss / test_total_batch, nn_loss / test_total_batch, bn_acc / test_total_batch, nn_acc / test_total_batch
# Save valid losses/acc
valid_losses.append([bn_loss, nn_loss])
valid_accs.append([bn_acc, nn_acc])
print('[Epoch %d-VALID] Batchnorm Loss(Acc): bn_loss:%.5f(bn_acc:%.2f) vs No Batchnorm Loss(Acc): nn_loss:%.5f(nn_acc:%.2f)' % ((epoch + 1), bn_loss.item(), bn_acc.item(), nn_loss.item(), nn_acc.item()))
print()
print('Learning finished')