-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathfunctions
119 lines (99 loc) · 4.01 KB
/
functions
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import numpy as np
import nibabel as nib
import glob
from tqdm import tqdm
import matplotlib
matplotlib.use('TkAgg', force=True)
import matplotlib.pyplot as plt
from scipy.ndimage import zoom
from sklearn.model_selection import train_test_split
from keras.layers import Conv2D, BatchNormalization, Activation, \
UpSampling2D, MaxPooling2D, Conv2DTranspose, Dropout, Concatenate, Input
from keras.models import Model
import random
import time
from keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger, ReduceLROnPlateau
import pandas as pd
def image_proc(filepath):
""" Data loader (*.nii)
:param filepath: file path
:return: 2D array images
"""
img_data0 = np.zeros((96, 96, 1), dtype='float32')
img_data = []
for item in tqdm(sorted(filepath), desc='Processing'):
# loading images
img = nib.load(item).get_fdata()
# Crop to get the brain region (along z-axis and x & y axes)
ind = np.where(img > 0)
ind_min, ind_max = min(ind[2]), max(ind[2])
ind_mid = round((ind_min + ind_max) / 2)
img = img[8:232,8:232,ind_mid-32:ind_mid+32] # to have 224 x 224 x 64 dim.
# resize
img = zoom(img, (0.428, 0.428, 1)) # to have 96 x 96 x 64 dim.
# Normalize using zero mean and unit variance method & scale to 0-1 range
img = ((img - img.mean()) / img.std())
img = ((img - img.min()) / (img.max() - img.min())) # Scale to 0-1 range
# Convert 3D images to 2D image slices
img_data0 = np.concatenate((img_data0, img), axis=2)
img_data0 = np.moveaxis(img_data0, [2], [0])
return np.array(img_data0[1::,:,:]).astype('float32')
def conv_block(input, num_filters):
""" Convolutional Layers """
x = Conv2D(num_filters, 3, kernel_initializer='he_uniform', padding="same")(input)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(num_filters, 3, kernel_initializer='he_uniform', padding="same")(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def encoder_block(input, num_filters):
""" Encoder Block """
x = conv_block(input, num_filters)
p = MaxPooling2D((2, 2))(x)
return x, p
def decoder_block(input, skip_features, num_filters):
""" Decoder Block """
x = UpSampling2D((2, 2))(input)
# x = Conv2DTranspose(num_filters, 2, strides=2, padding="same")(input)
x = Concatenate()([x, skip_features])
x = conv_block(x, num_filters)
return x
def build_2DUNet_model_v1(input_shape):
""" U-NET Architecture """
inputs = Input(input_shape, dtype='float32')
ini_numb_of_filters = 16
""" Eecoder 1, 2, 3, 4 """
s1, p1 = encoder_block(inputs, ini_numb_of_filters)
s2, p2 = encoder_block(p1, ini_numb_of_filters * 2)
s3, p3 = encoder_block(p2, ini_numb_of_filters * 4)
s4, p4 = encoder_block(p3, ini_numb_of_filters * 8)
""" Bridge """
b1 = conv_block(p4, ini_numb_of_filters * 16)
""" Decoder 1, 2, 3, 4 """
d1 = decoder_block(b1, s4, ini_numb_of_filters * 8)
d2 = decoder_block(d1, s3, ini_numb_of_filters * 4)
d3 = decoder_block(d2, s2, ini_numb_of_filters * 2)
d4 = decoder_block(d3, s1, ini_numb_of_filters)
""" Outputs """
outputs = Conv2D(1, 1, padding="same", activation="linear")(d4)
from keras.optimizers import Adam
learning_rate = 0.001
optimizer = Adam(learning_rate)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer=optimizer, loss='mse', metrics=['accuracy', 'mae'])
return model
def plot_learning_curve(filepath):
df = pd.read_csv(filepath)
df_x, df_yt, df_yv = df.values[:, 0], df.values[:, 2], df.values[:, 5]
plt.figure(figsize=(5, 4))
plt.plot(df_x, df_yt)
plt.plot(df_x, df_yv)
# plt.title('average training loss and validation loss')
plt.ylabel('mean-squared error', fontsize=16)
plt.xlabel('epoch', fontsize=16)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(['training loss', 'validation loss'], fontsize=14, loc='upper right')
plt.show()
return