-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfacial_expression.py
245 lines (214 loc) · 8.13 KB
/
facial_expression.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
#!/usr/bin/env python3
# This is a demo of Xnor.ai's AI2GO platform, which provides
# fast, low-power AI on embedded processors.
#
# This is a Raspberry Pi application that reads data from the
# Pi camera, uses AI2GO's facial expression classifier to assess camera frames
# of people in the image, and shows a suitable image onscreen.
#
# For more details on setting this demo up and running it, see:
# http://www.digitalenvironment.org
#
# Based on Matt Welsh's AI2GO example at
# https://medium.com/@mdwdotla/true-ai-on-a-raspberry-pi-with-no-extra-hardware-dcdbff12d068
#
# See README:
# To uninstall existing xnor model:
# python3 -m pip uninstall xnornet
# To instal new xnor model:
# python3 -m pip install xnornet*.whl
# To run this code:
# python3 facial_expression.py
# To autostart this code on the Pi:
# edit nano /home/pi/.config/autostart/facial_expression.desktop
#
import argparse
import datetime
import random
import sys
import time
import os.path
from itertools import cycle
#
import xnornet
from tkinter import *
from PIL import Image, ImageTk
import picamera
import common_util.ansi as ansi
import common_util.colors as colors
if sys.version_info[0] < 3:
sys.exit("This app requires Python 3.")
# Draw canvas for 720x720 pixel Neopixel screen
canvas_width = 720
canvas_height =720
master = Tk()
canvas = Canvas(master, bg = "white",
width = canvas_width, height = canvas_height)
canvas.pack()
# Set to True to generate fake data for testing.
FAKE_DATA = False
# Face image png file location
DIR_NAME='/home/pi/Public/faces'
# These constants are initialized below.
# Input resolution
INPUT_RES = 0
# Constant frame size
SINGLE_FRAME_SIZE_RGB = 0
SINGLE_FRAME_SIZE_YUV = 0
YUV420P_Y_PLANE_SIZE = 0
YUV420P_U_PLANE_SIZE = 0
YUV420P_V_PLANE_SIZE = 0
# Defines emotions used by emotion classifier
EMOTIONS = ['happy', 'sad', 'anger', 'fear', 'disgust', 'surprise', 'neutral']
BAD_MODEL_ERROR = (ansi.RED + "ERROR: " + ansi.NORMAL + "This app requires the "
"facial-expression-classifier model to be installed.")
# Place facial exression images on canvas - add system halt for headless mode
def _imageAdd(face = 'unknown'):
button = Button(master, width = 3, text = 'Halt', anchor='w',
fg = 'lightgray', command = shutdown, bg = "white")
canvas.delete("all")
try:
if os.path.isfile(os.path.join(DIR_NAME,"face_" + face + ".png")):
load = Image.open(DIR_NAME + "/face_" + face + ".png")
faceImage = ImageTk.PhotoImage(load)
else:
load = Image.open(DIR_NAME + "/face_makeaface.png")
faceImage = ImageTk.PhotoImage(load)
except IOError:
pass
canvas.create_image(0, 0, anchor = 'nw', image = faceImage)
button_window = canvas.create_window(10, 580, anchor='nw', window=button)
master.update()
def shutdown():
os.system("sudo shutdown -h now")
def _initialize_camera_vars(camera_res):
"""Initialize camera constants."""
global INPUT_RES
global SINGLE_FRAME_SIZE_RGB
global SINGLE_FRAME_SIZE_YUV
global YUV420P_Y_PLANE_SIZE
global YUV420P_U_PLANE_SIZE
global YUV420P_V_PLANE_SIZE
#
INPUT_RES = camera_res
SINGLE_FRAME_SIZE_RGB = INPUT_RES[0] * INPUT_RES[1] * 3
SINGLE_FRAME_SIZE_YUV = INPUT_RES[0] * INPUT_RES[1] * 3 // 2
YUV420P_Y_PLANE_SIZE = INPUT_RES[0] * INPUT_RES[1]
YUV420P_U_PLANE_SIZE = YUV420P_Y_PLANE_SIZE // 4
YUV420P_V_PLANE_SIZE = YUV420P_U_PLANE_SIZE
def _make_argument_parser():
"""Create a command-line argument parser object."""
parser = argparse.ArgumentParser(description=__doc__, allow_abbrev=False)
parser.add_argument(
"--camera_frame_rate",
action='store',
type=int,
default=8,
help="Adjust the framerate of the camera.")
parser.add_argument(
"--camera_brightness",
action='store',
type=int,
default=60,
help="Adjust the brightness of the camera.")
parser.add_argument(
"--camera_recording_format",
action='store',
type=str,
default='yuv',
choices={'yuv', 'rgb'},
help="Changing the camera recording format, \'yuv\' format is "
"implicitly defaulted to YUV420P.")
parser.add_argument(
"--camera_input_resolution",
action='store',
nargs=2,
type=int,
default=(512, 512),
help="Input Resolution of the camera.")
#default=(512, 512),
return parser
def _get_camera_frame(args, camera, stream):
"""Get a frame from the CircularIO buffer."""
cam_output = stream.getvalue()
if args.camera_recording_format == 'yuv':
# The camera has not written anything to the CircularIO yet
# Thus no frame is been captured
if len(cam_output) != SINGLE_FRAME_SIZE_YUV:
return None
# Split YUV plane
y_plane = cam_output[0:YUV420P_Y_PLANE_SIZE]
u_plane = cam_output[YUV420P_Y_PLANE_SIZE:YUV420P_Y_PLANE_SIZE +
YUV420P_U_PLANE_SIZE]
v_plane = cam_output[YUV420P_Y_PLANE_SIZE +
YUV420P_U_PLANE_SIZE:SINGLE_FRAME_SIZE_YUV]
# Passing corresponding YUV plane
model_input = xnornet.Input.yuv420p_image(INPUT_RES, y_plane, u_plane,
v_plane)
elif args.camera_recording_format == 'rgb':
# The camera has not written anything to the CircularIO yet
# Thus no frame is been captured
if len(cam_output) != SINGLE_FRAME_SIZE_RGB:
return None
model_input = xnornet.Input.rgb_image(INPUT_RES, cam_output)
else:
raise ValueError("Unsupported recording format")
return model_input
def _inference_loop(args, camera, stream, model):
"""Main inference loop."""
while True:
model_input = _get_camera_frame(args, camera, stream)
if model_input is not None:
results = model.evaluate(model_input)
print(results)
if FAKE_DATA:
expression = random.choice(EMOTIONS) # get random emotion
_imageAdd(expression)
else:
face = ([element.label for element in results])
if face:
expression = str(face[0])
# Example return: [ClassLabel(class_id=1825641713, label='neutral')]
#print("{}".format(expression))
_imageAdd(expression)
else:
_imageAdd()
time.sleep(2.0)
def main(args=None):
parser = _make_argument_parser()
args = parser.parse_args(args)
try:
camera = picamera.PiCamera()
camera.resolution = tuple(args.camera_input_resolution)
_initialize_camera_vars(camera.resolution)
# Initialize the buffer for picamera to hold the frame
# https://picamera.readthedocs.io/en/release-1.13/api_streams.html?highlight=PiCameraCircularIO
if args.camera_recording_format == 'yuv':
stream = picamera.PiCameraCircularIO(
camera, size=SINGLE_FRAME_SIZE_YUV)
elif args.camera_recording_format == 'rgb':
stream = picamera.PiCameraCircularIO(
camera, size=SINGLE_FRAME_SIZE_RGB)
else:
raise ValueError("Unsupported recording format")
camera.framerate = args.camera_frame_rate
camera.brightness = args.camera_brightness
# Record to the internal CircularIO
# PiCamera's YUV is YUV420P
# https://picamera.readthedocs.io/en/release-1.13/recipes2.html#unencoded-image-capture-yuv-format
camera.start_recording(stream, format=args.camera_recording_format)
# Load Xnor model from disk.
model = xnornet.Model.load_built_in()
# Verify model is facial expression classification model
for class_label in model.class_labels:
if class_label not in EMOTIONS:
sys.exit(BAD_MODEL_ERROR)
_inference_loop(args, camera, stream, model)
except Exception as e:
raise e
finally:
# For good practice, kill it by ctrl+c anyway.
camera.stop_recording()
camera.close()
if __name__ == "__main__":
main()