-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcalculate_psths.py
138 lines (103 loc) · 3.56 KB
/
calculate_psths.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
# %% Script Parameters
url = 'https://uni-bonn.sciebo.de/s/oTfGigwXQ4g0raW'
filename = 'data.nc'
# %% Download Data
# Exercise (Example): Make a download_data(url, filename) function:
def download_data(url, filename):
from pathlib import Path
import owncloud
client = owncloud.Client.from_public_link(url)
client.get_file('/', filename)
if Path(filename).exists():
print('Download Succeeded.')
return None
download_data(url=url, filename=filename)
# %% Load Data
# Exercise: Make a `load_data(filename)` function, returning the `dset` variable.
# %% Extract Experiment-Level Data
# Exercise: Make an `extract_trials(filename)` function, returning the `trials` variable.
import xarray as xr
dset = xr.load_dataset(filename)
trials = dset[['contrast_left', 'contrast_right', 'stim_onset']].to_dataframe()
trials
# %% Extract Spike-Time Data
# Exercise: Make an `extract_spikes(filename)` function, returning the `spikes` variable.
import xarray as xr
dset = xr.load_dataset(filename)
spikes = dset[['spike_trial', 'spike_cell', 'spike_time']].to_dataframe()
spikes
# %% Extract Cell-Level Data
# Exercise: Make an `extract_cells(filename)` function, returning the `cells` variable.
import xarray as xr
dset = xr.load_dataset(filename)
cells = dset['brain_groups'].to_dataframe()
cells
# %% Merge and Compress Extracted Data
# Exercise: Make a `merge_data(trials, cells, spikes)` function, returning the `merged` variable.
import pandas as pd
merged = pd.merge(left=cells, left_index=True, right=spikes, right_on='spike_cell')
merged = pd.merge(left=trials, right=merged, left_index=True, right_on='spike_trial').reset_index(drop=True)
merged.columns
merged = (merged
.rename(columns=dict(
brain_groups="brain_area",
spike_trial="trial_id",
spike_cell="cell_id",
spike_time="time"
))
[[
'trial_id',
'contrast_left',
'contrast_right',
'stim_onset',
'cell_id',
'brain_area',
'time'
]]
.astype(dict(
brain_area = 'category',
))
#
)
merged.info()
# %% Calculate Time Bins for PSTH
# Exercise: Make a `compute_time_bins(time, bin_interval)` function, returning the `time_bins` variable.
import numpy as np
time = merged['time']
time = np.round(time, decimals=6) # Round time to the nearest microsecond, to reduce floating point errors.
bin_interval = 0.05
time_bins = np.floor(time /bin_interval) * bin_interval # Round down to the nearest time bin start
time_bins
# %% filter out stimuli with contrast on the right.
# No function needed here for this exercise.
filtered = merged[merged['contrast_right'] == 0]
print(f"Filtered out {len(merged) - len(filtered)} ({len(filtered) / len(merged):.2%}) of spikes in dataset.")
filtered
# %% Make PSTHs
# Exercise: Make a `compute_psths(data, time_bins)` function here, returning the `psth` variable.
psth = (
filtered
.groupby([time_bins, 'trial_id', 'contrast_left', 'cell_id', 'brain_area'], observed=True, )
.size()
.rename('spike_count')
.reset_index()
)
psth
psth = (
psth
.groupby(['time', 'contrast_left', 'brain_area'], observed=True)
.spike_count
.mean()
.rename('avg_spike_count')
.reset_index()
)
psth
psth['avg_spike_rate'] = psth['avg_spike_count'] * bin_interval
psth
# %% Plot PSTHs
# Make a `plot_psths(psth)` function here, returning the `g` variable.
import seaborn as sns
g = sns.FacetGrid(data=psth, col='brain_area', col_wrap=2)
g.map_dataframe(sns.lineplot, x='time', y='avg_spike_count', hue='contrast_left')
g.add_legend()
g.savefig('PSTHs.png')