-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy paththreads.py
313 lines (264 loc) · 14.9 KB
/
threads.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
# -*- coding: utf-8 -*-
"""
eye_tracker/run.py
Demonstrates the ioHub Common EyeTracking Interface by displaying a gaze cursor
at the currently reported gaze position on an image background.
All currently supported Eye Tracker Implementations are supported,
with the Eye Tracker Technology chosen at the start of the demo via a
drop down list. Exact same demo script is used regardless of the
Eye Tracker hardware used.
Inital Version: May 6th, 2013, Sol Simpson
"""
from psychopy import visual
from psychopy.iohub import (EventConstants, EyeTrackerConstants,
getCurrentDateTimeString,
ioHubExperimentRuntime,module_directory,ExperimentVariableProvider)
import os
class ExperimentRuntime(ioHubExperimentRuntime):
"""
Create an experiment using psychopy and the ioHub framework by extending the ioHubExperimentRuntime class. At minimum
all that is needed in the __init__ for the new class, here called ExperimentRuntime, is the a call to the
ioHubExperimentRuntime __init__ itself.
"""
def run(self,*args):
"""
The run method contains your experiment logic. It is equal to what would be in your main psychopy experiment
script.py file in a standard psychopy experiment setup. That is all there is too it really.
"""
self.trial_conditions=ExperimentVariableProvider('trial_conditions.xls',
'BLOCK',None,False,True)
self.hub.initializeConditionVariableTable(self.trial_conditions)
selected_eyetracker_name=args[0]
# Let's make some short-cuts to the devices we will be using in this 'experiment'.
tracker=self.hub.devices.tracker
display=self.hub.devices.display
kb=self.hub.devices.kb
mouse=self.hub.devices.mouse
# Create a psychopy window, full screen resolution, full screen mode...
#
res=display.getPixelResolution()
window=visual.Window(res,monitor=display.getPsychopyMonitorName(),
units=display.getCoordinateType(),
fullscr=True,
allowGUI=False,
screen= display.getIndex()
)
# Hide the 'system mouse cursor'
#
mouse.setSystemCursorVisibility(False)
# Start by running the eye tracker default setup procedure.
# if validation results are returned, they would be in the form of a dict,
# so print them, otherwise just check that EYETRACKER_OK was returned.
#
# minimize the psychopy experiment window
#
window.winHandle.minimize()
result=tracker.runSetupProcedure()
if isinstance(result,dict):
print "Validation Accuracy Results: ", result
elif result != EyeTrackerConstants.EYETRACKER_OK:
print "An error occurred during eye tracker user setup: ",EyeTrackerConstants.getName(result)
# restore the psychopy experiment window
#
window.winHandle.maximize()
window.winHandle.activate()
# Create a dict of image stim for trials and a gaze blob to show gaze position.
#
display_coord_type=display.getCoordinateType()
image_cache=dict()
image_names=['canal.jpg','fall.jpg','party.jpg','swimming.jpg','lake.jpg']
for iname in image_names:
image_cache[iname]=visual.ImageStim(window, image=os.path.join('./images/',iname),
name=iname,units=display_coord_type)
gaze_dot =visual.GratingStim(window,tex=None, mask="gauss",
pos=(0,0 ),size=(66,66),color='green',
units=display_coord_type)
instructions_text_stim = visual.TextStim(window, text='', pos = [0,0], height=24,
color=[-1,-1,-1], colorSpace='rgb',alignHoriz='center', alignVert='center',wrapWidth=window.size[0]*.9)
# Update Instruction Text and display on screen.
# Send Message to ioHub DataStore with Exp. Start Screen display time.
#
instuction_text="Press Any Key to Start Experiment."
instructions_text_stim.setText(instuction_text)
instructions_text_stim.draw()
flip_time=window.flip()
self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time)
# wait until a key event occurs after the instructions are displayed
self.hub.clearEvents('all')
while not kb.getEvents():
self.hub.wait(0.2)
# Send some information to the ioHub DataStore as experiment messages
# including the eye tracker being used for this session.
#
self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO START")
self.hub.sendMessageEvent(text="ioHub Experiment started {0}".format(getCurrentDateTimeString()))
self.hub.sendMessageEvent(text="Experiment ID: {0}, Session ID: {1}".format(self.hub.experimentID,self.hub.experimentSessionID))
self.hub.sendMessageEvent(text="Stimulus Screen ID: {0}, Size (pixels): {1}, CoordType: {2}".format(display.getIndex(),display.getPixelResolution(),display.getCoordinateType()))
self.hub.sendMessageEvent(text="Calculated Pixels Per Degree: {0} x, {1} y".format(*display.getPixelsPerDegree()))
self.hub.sendMessageEvent(text="Eye Tracker being Used: {0}".format(selected_eyetracker_name))
self.hub.sendMessageEvent(text="IO_HUB EXPERIMENT_INFO END")
practice_blocks=self.trial_conditions.getPracticeBlocks()
exp_blocks=self.trial_conditions.getExperimentBlocks()
block_types=[practice_blocks,exp_blocks]
for blocks in block_types:
# for each block in the group of blocks.....
for trial_set in blocks.getNextConditionSet():
self.hub.clearEvents('all')
t=0
for trial in trial_set.getNextConditionSet():
# Update the instuction screen text...
#
instuction_text="Press Space Key To Start Trial %d"%t
instructions_text_stim.setText(instuction_text)
instructions_text_stim.draw()
flip_time=window.flip()
self.hub.sendMessageEvent(text="EXPERIMENT_START",sec_time=flip_time)
start_trial=False
# wait until a space key 'press' event occurs after the instructions are displayed
self.hub.clearEvents('all')
while not start_trial:
for event in kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
if event.key == ' ':
start_trial=True
break
self.hub.wait(0.2)
# So request to start trial has occurred...
# Clear the screen, start recording eye data, and clear all events
# received to far.
#
flip_time=window.flip()
trial['session_id']=self.hub.getSessionID()
trial['trial_id']=t+1
trial['TRIAL_START']=flip_time
self.hub.sendMessageEvent(text="TRIAL_START",sec_time=flip_time)
self.hub.clearEvents('all')
tracker.setRecordingState(True)
# Get the image name for this trial
#
imageStim=image_cache[trial['IMAGE_NAME']]
# Loop until we get a keyboard event
#
run_trial=True
while run_trial is True:
# Get the latest gaze position in dispolay coord space..
#
gpos=tracker.getLastGazePosition()
if isinstance(gpos,(tuple,list)):
# If we have a gaze position from the tracker, draw the
# background image and then the gaze_cursor.
#
gaze_dot.setPos(gpos)
imageStim.draw()
gaze_dot.draw()
else:
# Otherwise just draw the background image.
#
imageStim.draw()
# flip video buffers, updating the display with the stim we just
# updated.
#
flip_time=window.flip()
# Send a message to the ioHub Process / DataStore indicating
# the time the image was drawn and current position of gaze spot.
#
if isinstance(gpos,(tuple,list)):
self.hub.sendMessageEvent("IMAGE_UPDATE %s %.3f %.3f"%(iname,gpos[0],gpos[1]),sec_time=flip_time)
else:
self.hub.sendMessageEvent("IMAGE_UPDATE %s [NO GAZE]"%(iname),sec_time=flip_time)
# Check any new keyboard char events for a space key.
# If one is found, set the trial end variable.
#
for event in kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
if event.key == ' ':
run_trial=False
break
# So the trial has ended, send a message to the DataStore
# with the trial end time and stop recording eye data.
# In this example, we have no use for any eye data between trials, so why save it.
#
flip_time=window.flip()
trial['TRIAL_END']=flip_time
self.hub.sendMessageEvent(text="TRIAL_END %d"%t,sec_time=flip_time)
tracker.setRecordingState(False)
# Save the Experiment Condition Variable Data for this trial to the
# ioDataStore.
#
self.hub.addRowToConditionVariableTable(trial.tolist())
self.hub.clearEvents('all')
t+=1
# Disconnect the eye tracking device.
#
tracker.setConnectionState(False)
# Update the instuction screen text...
#
instuction_text="Press Any Key to Exit Demo"
instructions_text_stim.setText(instuction_text)
instructions_text_stim.draw()
flip_time=window.flip()
self.hub.sendMessageEvent(text="SHOW_DONE_TEXT",sec_time=flip_time)
# wait until any key is pressed
self.hub.clearEvents('all')
while not kb.getEvents(event_type_id=EventConstants.KEYBOARD_PRESS):
self.hub.wait(0.2)
# So the experiment is done, all trials have been run.
# Clear the screen and show an 'experiment done' message using the
# instructionScreen state. What for the trigger to exit that state.
# (i.e. the space key was pressed)
#
flip_time=window.flip()
self.hub.sendMessageEvent(text='EXPERIMENT_COMPLETE',sec_time=flip_time)
### End of experiment logic
####### Main Script Launching Code Below #######
if __name__ == "__main__":
def main(configurationDirectory):
"""
Creates an instance of the ExperimentRuntime class, gets the eye tracker
the user wants to use for the demo, and launches the experiment logic.
"""
import os
from psychopy import gui
# The following code merges a iohub_config file called iohub_config.yaml.part,
# that has all the iohub_config settings, other than those for the eye tracker.
# the eye tracker configs are in the yaml files in the eyetracker_configs dir.
#
# This code lets a person select an eye tracker, and then merges the main iohub_config.yaml.part
# with the contents of the eyetracker config yaml in eyetracker_configs
# associated with the selected tracker.
#
# The merged result is saved as iohub_config.yaml so it can be picked up
# by the Experiment _runtime
# as normal.
eye_tracker_config_files={
'LC Technologies EyeGaze':'eyetracker_configs/eyegaze_config.yaml',
'SMI iViewX':'eyetracker_configs/iviewx_config.yaml',
'SR Research EyeLink':'eyetracker_configs/eyelink_config.yaml',
'Tobii Technologies Eye Trackers':'eyetracker_configs/tobii_config.yaml'
}
info = {'Eye Tracker Type': ['Select', 'LC Technologies EyeGaze',
'SMI iViewX', 'SR Research EyeLink', 'Tobii Technologies Eye Trackers']}
dlg_info=dict(info)
infoDlg = gui.DlgFromDict(dictionary=dlg_info, title='Select Eye Tracker')
if not infoDlg.OK:
return -1
while dlg_info.values()[0] == u'Select' and infoDlg.OK:
dlg_info=dict(info)
infoDlg = gui.DlgFromDict(dictionary=dlg_info, title='SELECT Eye Tracker To Continue...')
if not infoDlg.OK:
return -1
base_config_file=os.path.normcase(os.path.join(configurationDirectory,
'iohub_config.yaml.part'))
eyetrack_config_file=os.path.normcase(os.path.join(configurationDirectory,
eye_tracker_config_files[dlg_info.values()[0]]))
combined_config_file_name=os.path.normcase(os.path.join(configurationDirectory,
'iohub_config.yaml'))
ExperimentRuntime.mergeConfigurationFiles(base_config_file,
eyetrack_config_file,combined_config_file_name)
runtime=ExperimentRuntime(configurationDirectory, "experiment_config.yaml")
runtime.start((dlg_info.values()[0],))
# Get the current directory, using a method that does not rely on __FILE__
# or the accuracy of the value of __FILE__.
#
configurationDirectory=module_directory(main)
# Run the main function, which starts the experiment runtime
#
main(configurationDirectory)