Skip to content

Commit 9c7b099

Browse files
Archiving library versions 0.6 and 0.7.
1 parent 84b14ab commit 9c7b099

File tree

730 files changed

+277150
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

730 files changed

+277150
-0
lines changed

0.6/.buildinfo

+4
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
# Sphinx build info version 1
2+
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
3+
config: 75c590fa0e9b50d7ddb42c80fc31816e
4+
tags: 645f666f9bcd5a90fca523b33c5a78b7

0.6/.nojekyll

Whitespace-only changes.

0.6/CNAME

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
braindecode.org
2+
www.braindecode.org
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
"""
2+
Custom Dataset Example
3+
======================
4+
5+
This example shows how to convert data X and y as numpy arrays to a braindecode
6+
compatible data format.
7+
"""
8+
9+
# Authors: Lukas Gemein <[email protected]>
10+
#
11+
# License: BSD (3-clause)
12+
13+
import mne
14+
15+
from braindecode.datasets import create_from_X_y
16+
17+
###############################################################################
18+
# To set up the example, we first fetch some data using mne:
19+
20+
# 5, 6, 7, 10, 13, 14 are codes for executed and imagined hands/feet
21+
subject_id = 22
22+
event_codes = [5, 6, 9, 10, 13, 14]
23+
# event_codes = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
24+
25+
# This will download the files if you don't have them yet,
26+
# and then return the paths to the files.
27+
physionet_paths = mne.datasets.eegbci.load_data(
28+
subject_id, event_codes, update_path=False)
29+
30+
# Load each of the files
31+
parts = [mne.io.read_raw_edf(path, preload=True, stim_channel='auto')
32+
for path in physionet_paths]
33+
34+
###############################################################################
35+
# We take the required data, targets and additional information sampling
36+
# frequency and channel names from the loaded data. Note that this data and
37+
# information can originate from any source.
38+
X = [raw.get_data() for raw in parts]
39+
y = event_codes
40+
sfreq = parts[0].info["sfreq"]
41+
ch_names = parts[0].info["ch_names"]
42+
43+
###############################################################################
44+
# Convert to data format compatible with skorch and braindecode:
45+
windows_dataset = create_from_X_y(
46+
X, y, drop_last_window=False, sfreq=sfreq, ch_names=ch_names,
47+
window_stride_samples=500,
48+
window_size_samples=500,
49+
)
50+
51+
windows_dataset.description # look as dataset description
52+
53+
###############################################################################
54+
# You can manipulate the dataset
55+
print(len(windows_dataset)) # get the number of samples
56+
57+
###############################################################################
58+
# You can now index the data
59+
i = 0
60+
x_i, y_i, window_ind = windows_dataset[0]
61+
n_channels, n_times = x_i.shape # the EEG data
62+
_, start_ind, stop_ind = window_ind
63+
print(f"n_channels={n_channels} -- n_times={n_times} -- y_i={y_i}")
64+
print(f"start_ind={start_ind} -- stop_ind={stop_ind}")
Binary file not shown.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
"""
2+
Load and save dataset example
3+
=============================
4+
5+
In this example, we show how to load and save braindecode datasets.
6+
"""
7+
8+
# Authors: Lukas Gemein <[email protected]>
9+
#
10+
# License: BSD (3-clause)
11+
12+
import tempfile
13+
14+
from braindecode.datasets import MOABBDataset
15+
from braindecode.preprocessing import preprocess, Preprocessor
16+
from braindecode.datautil import load_concat_dataset
17+
from braindecode.preprocessing import create_windows_from_events
18+
19+
20+
###############################################################################
21+
# First, we load some dataset using MOABB.
22+
dataset = MOABBDataset(
23+
dataset_name='BNCI2014001',
24+
subject_ids=[1],
25+
)
26+
27+
###############################################################################
28+
# We can apply preprocessing steps to the dataset. It is also possible to skip
29+
# this step and not apply any preprocessing.
30+
preprocess(
31+
concat_ds=dataset,
32+
preprocessors=[Preprocessor(fn='resample', sfreq=10)]
33+
)
34+
35+
###############################################################################
36+
# We save the dataset to a an existing directory. It will create a '.fif' file
37+
# for every dataset in the concat dataset. Additionally it will create two
38+
# JSON files, the first holding the description of the dataset, the second
39+
# holding the name of the target. If you want to store to the same directory
40+
# several times, for example due to trying different preprocessing, you can
41+
# choose to overwrite the existing files.
42+
43+
tmpdir = tempfile.mkdtemp() # write in a temporary directory
44+
dataset.save(
45+
path=tmpdir,
46+
overwrite=False,
47+
)
48+
49+
##############################################################################
50+
# We load the saved dataset from a directory. Signals can be preloaded in
51+
# compliance with mne. Optionally, only specific '.fif' files can be loaded
52+
# by specifying their ids. The target name can be changed, if the dataset
53+
# supports it (TUHAbnormal for example supports 'pathological', 'age', and
54+
# 'gender'. If you stored a preprocessed version with target 'pathological'
55+
# it is possible to change the target upon loading).
56+
dataset_loaded = load_concat_dataset(
57+
path=tmpdir,
58+
preload=True,
59+
ids_to_load=[1, 3],
60+
target_name=None,
61+
)
62+
63+
##############################################################################
64+
# The serialization utility also supports WindowsDatasets, so we create
65+
# compute windows next.
66+
windows_dataset = create_windows_from_events(
67+
concat_ds=dataset_loaded,
68+
trial_start_offset_samples=0,
69+
trial_stop_offset_samples=0,
70+
)
71+
72+
windows_dataset.description
73+
74+
##############################################################################
75+
# Again, we save the dataset to an existing directory. It will create a
76+
# '-epo.fif' file for every dataset in the concat dataset. Additionally it
77+
# will create a JSON file holding the description of the dataset. If you
78+
# want to store to the same directory several times, for example due to
79+
# trying different windowing parameters, you can choose to overwrite the
80+
# existing files.
81+
windows_dataset.save(
82+
path=tmpdir,
83+
overwrite=True,
84+
)
85+
86+
##############################################################################
87+
# Load the saved dataset from a directory. Signals can be preloaded in
88+
# compliance with mne. Optionally, only specific '-epo.fif' files can be
89+
# loaded by specifying their ids.
90+
windows_dataset_loaded = load_concat_dataset(
91+
path=tmpdir,
92+
preload=False,
93+
ids_to_load=[0],
94+
target_name=None,
95+
)
96+
97+
windows_dataset_loaded.description

0 commit comments

Comments
 (0)