-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathbrain2.py
105 lines (84 loc) · 3.14 KB
/
brain2.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
from tflearn.layers.core import input_data, dropout, fully_connected
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
import os
import cv2
from random import shuffle
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import matplotlib.pyplot as plt
TEST_DIR = 'test_data'
TRAIN_DIR = 'training_data'
LEARNING_RATE = 1e-3
MODEL_NAME = "eye-{}-{}.model".format(LEARNING_RATE, "6conv-fire")
IMAGE_SIZE = 50
def label_image(img):
if "open" in img:
return [1, 0]
elif "closed" in img:
return [0, 1]
def train_data_loder():
training_data = []
for img in tqdm(os.listdir(path=TRAIN_DIR)):
img_lable = label_image(img)
path_to_img = os.path.join(TRAIN_DIR, img)
img = cv2.resize(cv2.imread(
path_to_img, cv2.IMREAD_GRAYSCALE), (IMAGE_SIZE, IMAGE_SIZE))
training_data.append([np.array(img), np.array(img_lable)])
shuffle(training_data)
np.save("training_data_new.npy", training_data)
return training_data
def testing_data():
test_data = []
for img in tqdm(os.listdir(TEST_DIR)):
img_labels = img.split(".")[0]
path_to_img = os.path.join(TEST_DIR, img)
img = cv2.resize(cv2.imread(
path_to_img, cv2.IMREAD_GRAYSCALE), (IMAGE_SIZE, IMAGE_SIZE))
test_data.append([np.array(img), np.array(img_labels)])
shuffle(test_data)
np.save("test_dataone.npy", test_data)
return test_data
train_data_loder()
testing_data()
train_data_g = np.load('training_data_new.npy')
tf.reset_default_graph()
convnet = input_data(shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name='input')
# Conv Layer 1
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
# Conv Layer 2
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
# Conv Layer 3
convnet = conv_2d(convnet, 128, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 64, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
convnet = conv_2d(convnet, 32, 5, activation='relu')
convnet = max_pool_2d(convnet, 5)
# Dropout overfitting
convnet = dropout(convnet, 0.8)
# Fully Connected Layer with SoftMax as Activation Function
convnet = fully_connected(convnet, 2, activation='softmax')
# Regression for ConvNet with ADAM optimizer
convnet = regression(convnet, optimizer='adam', learning_rate=LEARNING_RATE,
loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists("{}.meta".format(MODEL_NAME)):
model.load(MODEL_NAME)
print("Model Loaded")
train = train_data_g[:-1000]
test = train_data_g[-1000:]
# This is our Training data
X = np.array([i[0] for i in train]).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
Y = [i[1] for i in train]
# This is our Training data
test_x = np.array([i[0] for i in test]).reshape(-1, IMAGE_SIZE, IMAGE_SIZE, 1)
test_y = [i[1] for i in test]
model.fit(X, Y, n_epoch=10, validation_set=(test_x, test_y),
snapshot_step=1000, show_metric=True, run_id=MODEL_NAME)
model.save(MODEL_NAME)
test_data = np.load("test_dataone.npy")