Skip to content

Commit

Permalink
Fixxed Issue, where classification data could not added to copy of ra…
Browse files Browse the repository at this point in the history
…w data for Varjo Base recordings.
  • Loading branch information
Thomahawkuru committed Jul 13, 2021
1 parent 4de7dc2 commit ad02e27
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 54 deletions.
50 changes: 30 additions & 20 deletions functions.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import numpy as np
import pandas as pd
from arff_helper import ArffHelper
from collections import OrderedDict

Expand Down Expand Up @@ -107,22 +108,23 @@ def get_xy_moving_average(data, window_size, inplace=False):
data[column][:] = res
return data

def fill_blink_gaps(Tx, Ty, t, s):
def fill_blink_gaps(data):
"""
Find gaps in the data that represent blinks, for recordings with Varjo Base.
In Unity recording, a blink period is recorded with zeros. This is how blinks are detected.
Varjo base does not record any data during a blink, so instead a jump in time-interval is found.
This funcition detects those gaps in the data and fills them with zero arrays for blink detection.
This function detects those gaps in the data and fills them with zero arrays for blink detection.
:param Tx: numpy array with 'x' data in deg
:param Ty: numpy array with 'y' data in deg
:param t: numpy array with timestamps
:param s: numpy array with gaze tatus
:param data: gazedata read from the .csv
:return: data set Tx, Ty, t, s with added interpolations where blinks occured
:return: patched data set with added interpolations where blinks occured
"""
t = data['raw_timestamp'] / 10 ** 6
t = np.array(t - t[0])
s = data['status']

# find blinks for Varjo base recording by gaps in time array
dt = np.diff(t)
blink_onsets = np.nonzero(dt > 30)[0]
Expand All @@ -134,25 +136,33 @@ def fill_blink_gaps(Tx, Ty, t, s):
for onset, offset in zip(blink_onsets, blink_offsets):
onset += shift
offset += shift

t = data['raw_timestamp'] / 10 ** 6
t = np.array(t - t[0])
gaptime = t[offset] - t[onset]
npoints = int(gaptime/dt.mean())

# create patches
timepatch = np.linspace(t[onset], t[offset], npoints+2)
timepatch = timepatch[1:-1]
datapatch = np.zeros(npoints)

# append the patches in the data arrays
t = np.insert(t, onset+1, timepatch)
Tx = np.insert(Tx, onset+1, datapatch)
Ty = np.insert(Ty, onset+1, datapatch)
s = np.insert(s, onset+1, datapatch)
# create patch
datapatch = pd.DataFrame(np.zeros([npoints, len(data.columns)]), columns=data.columns)

# append the patches in the data arrays data[:onset+1].append()
past_data = data[:(onset+1)]
future_data = data[offset:]
inserted_data = past_data.append(datapatch, ignore_index=True)
data = inserted_data.append(future_data, ignore_index=True)
# shift indexes with patch length
shift += npoints

return Tx, Ty, t, s
# fix time vector by replacing zeros with NaN and interpolating
raw_times = data['raw_timestamp']
raw_times[raw_times==0] = np.nan
data['raw_timestamp'] = raw_times.interpolate()

if 'relative_to_video_first_frame_timestamp' in data.columns:
raw_video_times = data['relative_to_video_first_frame_timestamp']
raw_video_times[raw_video_times == 0] = np.nan
data['relative_to_video_first_frame_timestamp'] = raw_video_times.interpolate()

return data

def save_events(data, fname, datapath):
"""
Expand All @@ -166,7 +176,7 @@ def save_events(data, fname, datapath):
"""
allnames = ["t_start", 't_end', 'duration', 'x_start', 'y_start', 'x_end', 'y_end', 'amplitude', 'mean_vel', 'max_vel']
names = allnames[0:len(data[1, :])]
names = allnames[0:len(data[0, :])]
delimiter = ','
header = delimiter.join(names)

Expand Down
8 changes: 4 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,8 @@
trialpath = datapath + '{}/{}/'.format(participant,trial)

print(), print('Trial ' + str(trial))
gazedata = readers.Gaze(datapath, participant, trial, filename)
pupildata = readers.Pupil(datapath, participant, trial, filename)
focusdata = readers.Focus(datapath, participant, trial, filename)
csvdata = readers.file_reader(datapath, participant, trial, filename)
gazedata = readers.gaze_arff(csvdata)

# classify gaze events ----------------------------------------------------------------------------------------------
classifiedgazedata = run_detection.DetectGazeEvents(gazedata, debugdetection)
Expand All @@ -56,13 +55,14 @@
if savedata:
outputpath = trialpath + 'detection'
Path(outputpath).mkdir(parents=True, exist_ok=True)

# save detections per even type with their measures
functions.save_events(Fixations, 'fixations.csv', outputpath)
functions.save_events(Saccades, 'saccades.csv', outputpath)
functions.save_events(Pursuits, 'pursuits.csv', outputpath)
functions.save_events(Blinks, 'blinks.csv', outputpath)

# add gaze_event classification column to raw data and save copy
csvdata = readers.file_reader(datapath, participant, trial, filename)
csvdata["gaze_event"] = classifiedgazedata['data']['EYE_MOVEMENT_TYPE']
csvdata.to_csv(outputpath + "/classified_data.csv")

Expand Down
41 changes: 11 additions & 30 deletions readers.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,52 +10,33 @@ def file_reader(path, participant, trial, filename):
path = path + str(participant) + "/" + str(trial) + "/"
file = [i for i in os.listdir(path) if os.path.isfile(os.path.join(path, i)) and \
filename in i]

csvdata = pandas.read_csv(path + file[0], delimiter=',')

return csvdata
# interpolate missing gaps in the data that represent Blinks (for Varjo Base recordings)
patched_data = functions.fill_blink_gaps(csvdata)

def Gaze(path, participant, trial, filename):
return patched_data

gazeData = file_reader(path, participant, trial, filename)
fname = "P" + str(participant) + "_T" + str(trial)
def gaze_arff(csvdata):

# Raw Gaze data
s = np.array(gazeData['status'])
x = np.array(gazeData['gaze_forward_x'])
y = np.array(gazeData['gaze_forward_y'])
s = np.array(csvdata['status'])
x = np.array(csvdata['gaze_forward_x'])
y = np.array(csvdata['gaze_forward_y'])

# get time stamps, checks wether or not a video time stamp is available
if 'relative_to_video_first_frame_timestamp' in gazeData.columns:
t = np.array(gazeData['relative_to_video_first_frame_timestamp'] / 10 ** 6)
if 'relative_to_video_first_frame_timestamp' in csvdata.columns:
t = np.array(csvdata['relative_to_video_first_frame_timestamp'] / 10 ** 6)
else:
t = gazeData['raw_timestamp'] / 10 ** 6
t = csvdata['raw_timestamp'] / 10 ** 6
t = np.array(t - t[0])

# convert to angles in deg
Tx = (180 / math.pi) * np.arcsin(x)
Ty = (180 / math.pi) * np.arcsin(y)

# interpolate missing gaps in the data that represent Blinks (for Varjo Base recordings)
[Tx, Ty, t, s,] = functions.fill_blink_gaps(Tx, Ty, t, s)

#convert data tor arff object for processing
gaze_points = functions.load_CSV_as_arff_object(Tx, Ty, t, s, fname)
gaze_points = functions.load_CSV_as_arff_object(Tx, Ty, t, s, '')

return gaze_points

def Pupil(path, participant, trial, filename):

gazeData = file_reader(path, participant, trial, filename)

left = np.array(gazeData['left_pupil_size'])
right = np.array(gazeData['right_pupil_size'])

return left, right

def Focus(path, participant, trial, filename):

gazeData = file_reader(path, participant, trial, filename)
focus = np.array(gazeData['focus_distance'])

return focus

0 comments on commit ad02e27

Please sign in to comment.