-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCNN.py
122 lines (90 loc) · 3.23 KB
/
CNN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.transforms import ToTensor
import numpy as np
import PIL
from PIL import Image
# used for creating own dataset w/ values
"""# CREATE OWN DATASET
class CNNDataset(Dataset):
def __init__(self):
pass
def __getitem__(self,index):
pass
def __len__(self):
pass"""
device = torch.device("cpu:0" if torch.cuda.is_available() else "cpu")
img = Image.open('./data/brain_tumour_data/test/no/no11.jpg')
img = np.array(img)
tempconvert = transforms.Compose([
transforms.ToTensor()
])
img = tempconvert(img)
mean, std = img.mean([1,2]), img.std([1,2])
data_transform = transforms.Compose([
#transforms.Grayscale(),
transforms.ToTensor(),
transforms.Resize((225,225)),
transforms.Normalize((mean), (std)) # essentially removes noise, increases clarity
])
train_loader = DataLoader(ImageFolder('./data/brain_tumour_data/train',transform=data_transform),batch_size=12,shuffle=True)
test_loader = DataLoader(ImageFolder('./data/brain_tumour_data/test',transform=data_transform),batch_size=12,shuffle=True)
# input channels, output channels, filter size
class ConvolutionalNeuralNetworks(nn.Module):
def __init__(self):
super(ConvolutionalNeuralNetworks, self).__init__()
# formula to calculate the dimensions of the matrix -->
self.conv = nn.Conv2d(3, 12, 6)
self.conv2 = nn.Conv2d(12, 32, 5)
self.maxpool = nn.MaxPool2d(2,2)
self.linear = nn.Linear(89888, 24)
self.linear2 = nn.Linear(24,12)
self.linear3 = nn.Linear(12,2)
self.dropout = nn.Dropout2d(0.25)
def forward(self,x):
#print(x.size(), "in forward1")
x = F.relu(self.conv(x))
x = self.maxpool(x)
x = self.dropout(x)
#print(x.size(), "in forward2")
x = F.relu(self.conv2(x))
x = self.maxpool(x)
x = self.dropout(x)
#print(x.size(), "in forward3")
x = x.view(x.size(0),-1)
#print(x.size(), "in forward4")
x = F.relu(self.linear(x))
#print(x.size(), "in forward5")
x = F.relu(self.linear2(x))
#print(x.size(), "in forward6")
return self.linear3(x)
model = ConvolutionalNeuralNetworks().to(device)
lr = 0.01
epochs = 10000
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr = lr)
# training loop
print('hi')
for epoch in range(epochs):
for i, (image, label) in enumerate(train_loader):
image = image.to(device)
label = label.to(device)
optimizer.zero_grad()
predicted = model(image)
loss = criterion(predicted, label)
loss.backward()
optimizer.step()
if epoch % 2 == 0:
print(f"epoch : {epoch}, loss : {loss.item():3f}")
for epoch in range(epochs):
for i, (image, label) in enumerate(test_loader):
optimizer.zero_grad()
predicted = model(image)
label = torch.tensor(label)
loss = criterion(label.long(), predicted)
loss.backward()
optimizer.step()