-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathmodels.py
129 lines (107 loc) · 5.55 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
'''
Contains the models used in the project.
'''
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, UpSampling2D, InputLayer, Activation
from tensorflow.keras.models import Sequential
'''
Returns the loss between the predicted colours and the ground truth colours of a specific training instance. The calculation is based on the
CIE94 colour distance metric that is an improvement over pure RMSE on the L*a*b* vectors.
NOTE:
1. This function is currently hardcoded to be specifically for an image resolution of 384 * 384, and for training batch size 1.
'''
def cie94(y_true, y_pred, batch_size):
alpha = []
loss = 0.0
for x in range(0, 384*384):
alpha.append(x*2)
beta = []
for y in range(1, 384*384 + 1):
beta.append(y*2 - 1)
batch_size = batch_size # Batch size is hardcoded, reading y_true.shape[0] is problematic on different environments.
normalizing_constant = batch_size * 384 * 384 # Height and Width are hardcoded, reading y_true.shape[1] and y_true.shape[2] is problematic on different environments.
for i in range(0, batch_size):
trueAlpha = tf.keras.backend.gather(tf.keras.backend.flatten(y_true[i]), alpha)
predAlpha = tf.keras.backend.gather(tf.keras.backend.flatten(y_pred[i]), alpha)
trueBeta = tf.keras.backend.gather(tf.keras.backend.flatten(y_true[i]), beta)
predBeta = tf.keras.backend.gather(tf.keras.backend.flatten(y_pred[i]), beta)
trueAlphaSqr = tf.keras.backend.square(trueAlpha)
trueBetaSqr = tf.keras.backend.square(trueBeta)
predAlphaSqr = tf.keras.backend.square(predAlpha)
predBetaSqr = tf.keras.backend.square(predBeta)
C1 = tf.keras.backend.sqrt(trueAlphaSqr + trueBetaSqr)
C2 = tf.keras.backend.sqrt(predAlphaSqr + predBetaSqr)
delta_C = C1 - C2
delta_Csqr = tf.keras.backend.square(delta_C)
delta_a = trueAlpha - predAlpha
delta_b = trueBeta - predBeta
delta_H_square = tf.keras.backend.square(delta_a) + tf.keras.backend.square(delta_b) - delta_Csqr
K_1 = 0.045
K_2 = 0.015
loss += tf.keras.backend.sum(tf.keras.backend.sqrt(delta_Csqr / tf.keras.backend.square(1.0 + K_1 * C1) + delta_H_square / tf.keras.backend.square(1.0 + K_2 * C1)), axis=0)
return loss / normalizing_constant
'''
Returns the model object for image colouring via MSE loss.
'''
def getMSEModel():
model = Sequential()
model.add(InputLayer(input_shape=(None, None, 1)))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
model.compile(optimizer='rmsprop',loss='mse')
return model
'''
Returns the model object for image colouring via CIE94 Colour Distance loss.
'''
def getCIE94Model(batch_size):
model = Sequential()
model.add(InputLayer(input_shape=(None, None, 1)))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', strides=2))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(16, (3,3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(2, (3, 3), activation='tanh', padding='same'))
def getCIE94func(batch_size):
return lambda a, b: cie94(a, b, batch_size)
opti = tf.keras.optimizers.RMSprop(learning_rate=0.0022)
model.compile(optimizer='rmsprop',loss=getCIE94func(batch_size))
return model
'''
Returns the model object that uses classification for colouring.
'''
def getClassificationModel():
model = Sequential()
model.add(InputLayer(input_shape=(None, None, 1)))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(8, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same', strides=2))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same', strides=2))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(16, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D((2, 2)))
model.add(Conv2D(313, (1, 1)))
model.add(Activation('softmax'))
model.compile(optimizer='rmsprop',loss='categorical_crossentropy')
return model