Skip to content

Commit 546c80d

Browse files
authored
Add files via upload
0 parents  commit 546c80d

6 files changed

+39051
-0
lines changed

best_model.ipynb

+243
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,243 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"## Import libraries & model "
8+
]
9+
},
10+
{
11+
"cell_type": "code",
12+
"execution_count": 15,
13+
"metadata": {},
14+
"outputs": [],
15+
"source": [
16+
"from keras.models import model_from_json\n",
17+
"# from PIL import Image\n",
18+
"# import numpy as np\n",
19+
"# import pandas as pd\n",
20+
"# from sklearn.preprocessing import OneHotEncoder\n",
21+
"# from sklearn.metrics import f1_score, recall_score, precision_score\n",
22+
"import numpy as np\n",
23+
"import cv2\n",
24+
"from resizeimage import resizeimage\n",
25+
"import sys"
26+
]
27+
},
28+
{
29+
"cell_type": "code",
30+
"execution_count": 16,
31+
"metadata": {},
32+
"outputs": [
33+
{
34+
"name": "stdout",
35+
"output_type": "stream",
36+
"text": [
37+
"Loaded model into notebook\n"
38+
]
39+
}
40+
],
41+
"source": [
42+
"# load json and create model\n",
43+
"json_file = open('model.json', 'r')\n",
44+
"loaded_model_json = json_file.read()\n",
45+
"json_file.close()\n",
46+
"loaded_model = model_from_json(loaded_model_json)\n",
47+
"\n",
48+
"# load weights into new model\n",
49+
"loaded_model.load_weights(\"model_weights.h5\")\n",
50+
"print(\"Loaded model into notebook\")"
51+
]
52+
},
53+
{
54+
"cell_type": "code",
55+
"execution_count": 17,
56+
"metadata": {},
57+
"outputs": [],
58+
"source": [
59+
"loaded_model.compile(loss='categorical_crossentropy',\n",
60+
" optimizer=\"sgd\",\n",
61+
" metrics=['acc'])"
62+
]
63+
},
64+
{
65+
"cell_type": "code",
66+
"execution_count": 18,
67+
"metadata": {},
68+
"outputs": [
69+
{
70+
"name": "stdout",
71+
"output_type": "stream",
72+
"text": [
73+
"_________________________________________________________________\n",
74+
"Layer (type) Output Shape Param # \n",
75+
"=================================================================\n",
76+
"vgg19 (Model) (None, 1, 1, 512) 20024384 \n",
77+
"_________________________________________________________________\n",
78+
"flatten_1 (Flatten) (None, 512) 0 \n",
79+
"_________________________________________________________________\n",
80+
"dense_1 (Dense) (None, 128) 65664 \n",
81+
"_________________________________________________________________\n",
82+
"dense_2 (Dense) (None, 64) 8256 \n",
83+
"_________________________________________________________________\n",
84+
"dense_3 (Dense) (None, 7) 455 \n",
85+
"=================================================================\n",
86+
"Total params: 20,098,759\n",
87+
"Trainable params: 20,024,384\n",
88+
"Non-trainable params: 74,375\n",
89+
"_________________________________________________________________\n"
90+
]
91+
}
92+
],
93+
"source": [
94+
"loaded_model.summary()"
95+
]
96+
},
97+
{
98+
"cell_type": "code",
99+
"execution_count": 19,
100+
"metadata": {},
101+
"outputs": [],
102+
"source": [
103+
"# Emotions dictionary\n",
104+
"emotions = {\"anger\" : 0,\n",
105+
"\"disgust\" : 1,\n",
106+
"\"fear\" : 2,\n",
107+
"\"happy\" : 3,\n",
108+
"\"sad\" : 4,\n",
109+
"\"surprise\" : 5,\n",
110+
"\"neutral\" : 6}"
111+
]
112+
},
113+
{
114+
"cell_type": "markdown",
115+
"metadata": {},
116+
"source": [
117+
"https://github.com/opencv/opencv/tree/master/data/haarcascades"
118+
]
119+
},
120+
{
121+
"cell_type": "markdown",
122+
"metadata": {},
123+
"source": [
124+
"# Launch Video w/ Model"
125+
]
126+
},
127+
{
128+
"cell_type": "code",
129+
"execution_count": 20,
130+
"metadata": {},
131+
"outputs": [],
132+
"source": [
133+
"cap = cv2.VideoCapture(0)\n",
134+
"# Get user supplied values\n",
135+
"# imagePath = sys.argv[1]\n",
136+
"# cascPath = sys.argv[2]"
137+
]
138+
},
139+
{
140+
"cell_type": "code",
141+
"execution_count": 21,
142+
"metadata": {},
143+
"outputs": [],
144+
"source": [
145+
"# Load in the opencv file to detect face\n",
146+
"face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n"
147+
]
148+
},
149+
{
150+
"cell_type": "code",
151+
"execution_count": 22,
152+
"metadata": {},
153+
"outputs": [],
154+
"source": [
155+
"while(True):\n",
156+
" # Capture frame-by-frame\n",
157+
" ret, frame = cap.read()\n",
158+
" \n",
159+
" # Conver to grayscale\n",
160+
" gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
161+
" \n",
162+
" # Face Detection\n",
163+
" faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n",
164+
" for (x,y,w,h) in faces:\n",
165+
" crop_img = gray[y:y+h, x:x+w]\n",
166+
"\n",
167+
" # Get width and height\n",
168+
" width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n",
169+
" height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n",
170+
"\n",
171+
" # Resize for our model (48x48x1)\n",
172+
" small = cv2.resize(crop_img, dsize = (48,48))\n",
173+
" # convert size from 48x48 to 1x48x48\n",
174+
" image3D = np.expand_dims(small,axis = 0)\n",
175+
" # convert to 1x48x48x1\n",
176+
" image4D = np.expand_dims(image3D, axis = 3)\n",
177+
" # convert to 1x48x48x3\n",
178+
" image4D3 = np.repeat(image4D, 3, axis=3)\n",
179+
"\n",
180+
" # Model each frame\n",
181+
" emotions_prob = loaded_model.predict(image4D3)[0]\n",
182+
" # Convert emotion probabilities into binary, where 1 is the emotion you're feeling\n",
183+
" listt = [1 if metric == emotions_prob.max() else 0 for metric in emotions_prob]\n",
184+
" # Get the index 1 in the binary list, listt \n",
185+
" emotion_index = listt.index(1)\n",
186+
" emotion = list(emotions.keys())[emotion_index]\n",
187+
"\n",
188+
" # Show Emotion on Video\n",
189+
" font = cv2.FONT_HERSHEY_SIMPLEX\n",
190+
" text_placement = (int(width/2 - 500),int(height/2 + 100))\n",
191+
" fontScale = 1\n",
192+
" fontColor = (255,255,255)\n",
193+
" lineType = 4\n",
194+
"\n",
195+
" cv2.putText(frame, \n",
196+
" '{}'.format(emotion), \n",
197+
" text_placement, \n",
198+
" font, \n",
199+
" fontScale,\n",
200+
" fontColor,\n",
201+
" lineType)\n",
202+
" \n",
203+
" # Display the resulting frame\n",
204+
" cv2.imshow('frame',frame)\n",
205+
" if cv2.waitKey(20) & 0xFF == ord('q'):\n",
206+
" break\n",
207+
"\n",
208+
"# When everything done, release the capture\n",
209+
"cap.release()\n",
210+
"cv2.waitKey(0)\n",
211+
"cv2.destroyAllWindows() "
212+
]
213+
},
214+
{
215+
"cell_type": "code",
216+
"execution_count": null,
217+
"metadata": {},
218+
"outputs": [],
219+
"source": []
220+
}
221+
],
222+
"metadata": {
223+
"kernelspec": {
224+
"display_name": "Python 3",
225+
"language": "python",
226+
"name": "python3"
227+
},
228+
"language_info": {
229+
"codemirror_mode": {
230+
"name": "ipython",
231+
"version": 3
232+
},
233+
"file_extension": ".py",
234+
"mimetype": "text/x-python",
235+
"name": "python",
236+
"nbconvert_exporter": "python",
237+
"pygments_lexer": "ipython3",
238+
"version": "3.6.7"
239+
}
240+
},
241+
"nbformat": 4,
242+
"nbformat_minor": 2
243+
}

emotions.ipynb

+1
Large diffs are not rendered by default.

0 commit comments

Comments
 (0)