18
18
config .img_width = 224
19
19
config .img_height = 224
20
20
config .epochs = 50
21
- config .batch_size = 40
21
+ config .batch_size = 40
22
22
23
23
top_model_weights_path = 'bottleneck.h5'
24
24
train_dir = 'dogcat-data/train'
25
25
validation_dir = 'dogcat-data/validation'
26
26
nb_train_samples = 1000
27
27
nb_validation_samples = 1000
28
28
29
+
29
30
def save_bottlebeck_features ():
30
31
if os .path .exists ('bottleneck_features_train.npy' ) and (len (sys .argv ) == 1 or sys .argv [1 ] != "--force" ):
31
32
print ("Using saved features, pass --force to save new features" )
32
33
return
33
34
datagen = ImageDataGenerator (preprocessing_function = preprocess_input )
34
35
train_generator = datagen .flow_from_directory (
35
- train_dir ,
36
- target_size = (config .img_width , config .img_height ),
37
- batch_size = config .batch_size ,
38
- class_mode = "binary" )
36
+ train_dir ,
37
+ target_size = (config .img_width , config .img_height ),
38
+ batch_size = config .batch_size ,
39
+ class_mode = "binary" )
39
40
40
41
val_generator = datagen .flow_from_directory (
41
- validation_dir ,
42
- target_size = (config .img_width , config .img_height ),
43
- batch_size = config .batch_size ,
44
- class_mode = "binary" )
45
-
42
+ validation_dir ,
43
+ target_size = (config .img_width , config .img_height ),
44
+ batch_size = config .batch_size ,
45
+ class_mode = "binary" )
46
+
46
47
# build the VGG16 network
47
48
model = VGG16 (include_top = False , weights = 'imagenet' )
48
-
49
+
49
50
print ("Predicting bottleneck training features" )
50
51
training_labels = []
51
52
training_features = []
52
- for batch in range (5 ): # nb_train_samples // config.batch_size):
53
+ for batch in range (5 ): # nb_train_samples // config.batch_size):
53
54
data , labels = next (train_generator )
54
55
training_labels .append (labels )
55
56
training_features .append (model .predict (data ))
56
57
training_labels = np .concatenate (training_labels )
57
58
training_features = np .concatenate (training_features )
58
59
np .savez (open ('bottleneck_features_train.npy' , 'wb' ),
59
- features = training_features , labels = training_labels )
60
-
60
+ features = training_features , labels = training_labels )
61
+
61
62
print ("Predicting bottleneck validation features" )
62
63
validation_labels = []
63
64
validation_features = []
@@ -71,7 +72,7 @@ def save_bottlebeck_features():
71
72
validation_features = np .concatenate (validation_features )
72
73
validation_data = np .concatenate (validation_data )
73
74
np .savez (open ('bottleneck_features_validation.npy' , 'wb' ),
74
- features = training_features , labels = training_labels , data = validation_data )
75
+ features = validation_features , labels = validation_labels , data = validation_data )
75
76
76
77
77
78
def train_top_model ():
@@ -88,18 +89,20 @@ def train_top_model():
88
89
89
90
model .compile (optimizer = 'rmsprop' ,
90
91
loss = 'binary_crossentropy' , metrics = ['accuracy' ])
91
-
92
+
92
93
class Images (Callback ):
93
94
def on_epoch_end (self , epoch , logs ):
94
95
base_model = VGG16 (include_top = False , weights = 'imagenet' )
95
96
indices = np .random .randint (val_data .shape [0 ], size = 36 )
96
97
test_data = val_data [indices ]
97
- features = base_model .predict (np .array ([preprocess_input (data ) for data in test_data ]))
98
+ features = base_model .predict (
99
+ np .array ([preprocess_input (data ) for data in test_data ]))
98
100
pred_data = model .predict (features )
99
101
wandb .log ({
100
- "examples" : [
101
- wandb .Image (test_data [i ], caption = "cat" if pred_data [i ] < 0.5 else "dog" )
102
- for i , data in enumerate (test_data )]
102
+ "examples" : [
103
+ wandb .Image (
104
+ test_data [i ], caption = "cat" if pred_data [i ] < 0.5 else "dog" )
105
+ for i , data in enumerate (test_data )]
103
106
}, commit = False )
104
107
105
108
model .fit (X_train , y_train ,
0 commit comments