Skip to content

Commit 7208601

Browse files
committed
1 parent 62f69e8 commit 7208601

File tree

119 files changed

+2194
-2484
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

119 files changed

+2194
-2484
lines changed
Binary file not shown.

master/_downloads/1224801107d6c3b138edf8ee33de0561/plot_bcic_iv_2a_moabb_cropped.ipynb

Lines changed: 4 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -51,18 +51,7 @@
5151
"cell_type": "markdown",
5252
"metadata": {},
5353
"source": [
54-
"## Create model and compute windowing parameters\n\n\n"
55-
]
56-
},
57-
{
58-
"cell_type": "code",
59-
"execution_count": null,
60-
"metadata": {
61-
"collapsed": false
62-
},
63-
"outputs": [],
64-
"source": [
65-
"# In contrast to trialwise decoding, we first have to create the model\n# before we can cut the dataset into windows. This is because we need to\n# know the receptive field of the network to know how large the window\n# stride should be.\n#"
54+
"## Create model and compute windowing parameters\n\nIn contrast to trialwise decoding, we first have to create the model\nbefore we can cut the dataset into windows. This is because we need to\nknow the receptive field of the network to know how large the window\nstride should be.\n\n\n"
6655
]
6756
},
6857
{
@@ -134,7 +123,7 @@
134123
"cell_type": "markdown",
135124
"metadata": {},
136125
"source": [
137-
"Cut the data into windows\n -------------------------\n#####################################################################\n In contrast to trialwise decoding, we have to supply an explicit window size and\n window stride to the ``create_windows_from_events`` function.\n\n\n"
126+
"## Cut the data into windows\nIn contrast to trialwise decoding, we have to supply an explicit\nwindow size and window stride to the ``create_windows_from_events``\nfunction.\n\n\n"
138127
]
139128
},
140129
{
@@ -188,7 +177,7 @@
188177
"cell_type": "markdown",
189178
"metadata": {},
190179
"source": [
191-
"Plot Results\n ------------\n\n#####################################################################\n\n"
180+
"## Plot Results\nThis is again the same code as in trialwise decoding.\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n<div class=\"alert alert-info\"><h4>Note</h4><p>Note that we drop further in the classification error and\n loss as in the trialwise decoding tutorial.</p></div>\n\n\n"
192181
]
193182
},
194183
{
@@ -199,7 +188,7 @@
199188
},
200189
"outputs": [],
201190
"source": [
202-
"# This is again the same code as in trialwise decoding.\n#\n# .. note::\n#\n# Note that we drop further in the classification error and\n# loss as in the trialwise decoding tutorial.\n#\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport pandas as pd\n\n# Extract loss and accuracy values for plotting from history object\nresults_columns = ['train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy']\ndf = pd.DataFrame(clf.history[:, results_columns], columns=results_columns,\n index=clf.history[:, 'epoch'])\n\n# get percent of misclass for better visual comparison to loss\ndf = df.assign(train_misclass=100 - 100 * df.train_accuracy,\n valid_misclass=100 - 100 * df.valid_accuracy)\n\nplt.style.use('seaborn')\nfig, ax1 = plt.subplots(figsize=(8, 3))\ndf.loc[:, ['train_loss', 'valid_loss']].plot(\n ax=ax1, style=['-', ':'], marker='o', color='tab:blue', legend=False, fontsize=14)\n\nax1.tick_params(axis='y', labelcolor='tab:blue', labelsize=14)\nax1.set_ylabel(\"Loss\", color='tab:blue', fontsize=14)\n\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\ndf.loc[:, ['train_misclass', 'valid_misclass']].plot(\n ax=ax2, style=['-', ':'], marker='o', color='tab:red', legend=False)\nax2.tick_params(axis='y', labelcolor='tab:red', labelsize=14)\nax2.set_ylabel(\"Misclassification Rate [%]\", color='tab:red', fontsize=14)\nax2.set_ylim(ax2.get_ylim()[0], 85) # make some room for legend\nax1.set_xlabel(\"Epoch\", fontsize=14)\n\n# where some data has already been plotted to ax\nhandles = []\nhandles.append(Line2D([0], [0], color='black', linewidth=1, linestyle='-', label='Train'))\nhandles.append(Line2D([0], [0], color='black', linewidth=1, linestyle=':', label='Valid'))\nplt.legend(handles, [h.get_label() for h in handles], fontsize=14)\nplt.tight_layout()"
191+
"import matplotlib.pyplot as plt\nfrom matplotlib.lines import Line2D\nimport pandas as pd\n\n# Extract loss and accuracy values for plotting from history object\nresults_columns = ['train_loss', 'valid_loss', 'train_accuracy', 'valid_accuracy']\ndf = pd.DataFrame(clf.history[:, results_columns], columns=results_columns,\n index=clf.history[:, 'epoch'])\n\n# get percent of misclass for better visual comparison to loss\ndf = df.assign(train_misclass=100 - 100 * df.train_accuracy,\n valid_misclass=100 - 100 * df.valid_accuracy)\n\nplt.style.use('seaborn')\nfig, ax1 = plt.subplots(figsize=(8, 3))\ndf.loc[:, ['train_loss', 'valid_loss']].plot(\n ax=ax1, style=['-', ':'], marker='o', color='tab:blue', legend=False, fontsize=14)\n\nax1.tick_params(axis='y', labelcolor='tab:blue', labelsize=14)\nax1.set_ylabel(\"Loss\", color='tab:blue', fontsize=14)\n\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\n\ndf.loc[:, ['train_misclass', 'valid_misclass']].plot(\n ax=ax2, style=['-', ':'], marker='o', color='tab:red', legend=False)\nax2.tick_params(axis='y', labelcolor='tab:red', labelsize=14)\nax2.set_ylabel(\"Misclassification Rate [%]\", color='tab:red', fontsize=14)\nax2.set_ylim(ax2.get_ylim()[0], 85) # make some room for legend\nax1.set_xlabel(\"Epoch\", fontsize=14)\n\n# where some data has already been plotted to ax\nhandles = []\nhandles.append(Line2D([0], [0], color='black', linewidth=1, linestyle='-', label='Train'))\nhandles.append(Line2D([0], [0], color='black', linewidth=1, linestyle=':', label='Valid'))\nplt.legend(handles, [h.get_label() for h in handles], fontsize=14)\nplt.tight_layout()"
203192
]
204193
},
205194
{

master/_downloads/3e0455c88adcaa2571230abb981020db/plot_relative_positioning.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@
159159
"cell_type": "markdown",
160160
"metadata": {},
161161
"source": [
162-
"## Training\n\nWe can now train our network on the pretext task. We use similar\nhyperparameters as in [1]_, but reduce the number of epochs and increase the\nlearning rate to account for the smaller setting of this example.\n\n"
162+
"## Training\n\nWe can now train our network on the pretext task. We use similar\nhyperparameters as in [1]_, but reduce the number of epochs and\nincrease the learning rate to account for the smaller setting of\nthis example.\n\n"
163163
]
164164
},
165165
{
Binary file not shown.

master/_downloads/995f58ef1b3d29dce64caee24e5bf457/plot_bcic_iv_2a_moabb_cropped.py

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -106,9 +106,6 @@
106106
# Create model and compute windowing parameters
107107
# ---------------------------------------------
108108
#
109-
110-
######################################################################
111-
112109
# In contrast to trialwise decoding, we first have to create the model
113110
# before we can cut the dataset into windows. This is because we need to
114111
# know the receptive field of the network to know how large the window
@@ -191,9 +188,9 @@
191188
######################################################################
192189
# Cut the data into windows
193190
# -------------------------
194-
######################################################################
195-
# In contrast to trialwise decoding, we have to supply an explicit window size and
196-
# window stride to the ``create_windows_from_events`` function.
191+
# In contrast to trialwise decoding, we have to supply an explicit
192+
# window size and window stride to the ``create_windows_from_events``
193+
# function.
197194
#
198195

199196
from braindecode.preprocessing import create_windows_from_events
@@ -289,12 +286,9 @@
289286

290287
######################################################################
291288
# Plot Results
292-
# ------------
293-
#
294-
######################################################################
295-
289+
# ----------------
296290
# This is again the same code as in trialwise decoding.
297-
#
291+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
298292
# .. note::
299293
#
300294
# Note that we drop further in the classification error and

master/_downloads/9f3a9058fba292cd75e5ce23b492bc0c/plot_data_augmentation_search.py

Lines changed: 15 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
55
This tutorial shows how to search data augmentations using braindecode.
66
Indeed, it is known that the best augmentation to use often dependent on the task
7-
or phenomenon studied. Here we follow the methodology proposed in [1]_ on the
7+
or phenomenon studied. Here we follow the methodology proposed in [1] on the
88
openly available BCI IV 2a Dataset.
99
1010
@@ -25,6 +25,17 @@
2525
Here, we use the augmentation module present in braindecode in the context of
2626
trialwise decoding with the BCI IV 2a dataset.
2727
28+
References
29+
-----------
30+
31+
.. [1] Rommel, C., Paillard, J., Moreau, T., & Gramfort, A. (2022)
32+
Data augmentation for learning predictive models on EEG:
33+
a systematic comparison. https://arxiv.org/abs/2206.14483
34+
35+
.. [2] Banville, H., Chehab, O., Hyvärinen, A., Engemann, D. A., & Gramfort, A. (2021).
36+
Uncovering the structure of clinical EEG signals with self-supervised learning.
37+
Journal of Neural Engineering, 18(4), 046020.
38+
2839
.. contents:: This example covers:
2940
:local:
3041
:depth: 2
@@ -109,7 +120,7 @@
109120

110121
######################################################################
111122
# Defining a list of transforms
112-
# --------------------
123+
# ------------------------------
113124
#
114125
# In this tutorial, we will use three categories of augmentations.
115126
# This categorization has been proposed by [1]_ to explain and aggregate
@@ -199,7 +210,7 @@
199210
if cuda:
200211
model.cuda()
201212

202-
######################################################################
213+
##########################################################################
203214
# The model is now trained as in the trial-wise example. The
204215
# ``AugmentedDataLoader`` is used as the train iterator and the list of
205216
# transforms are passed as arguments.
@@ -233,7 +244,7 @@
233244
# generator of the training.
234245

235246
train_X = SliceDataset(train_set, idx=0)
236-
train_y = array([y for y in SliceDataset(train_set, idx=1)])
247+
train_y = array(list(SliceDataset(train_set, idx=1)))
237248

238249
#######################################################################
239250
# Given the trialwise approach, here we use the KFold approach and
@@ -290,15 +301,3 @@
290301
eval_y = SliceDataset(eval_set, idx=1)
291302
score = search.score(eval_X, eval_y)
292303
print(f'Eval accuracy is {score * 100:.2f}%.')
293-
294-
# References
295-
# ----------
296-
#
297-
# .. [1] Rommel, C., Paillard, J., Moreau, T., & Gramfort, A. (2022)
298-
# Data augmentation for learning predictive models on EEG:
299-
# a systematic comparison.
300-
# https://arxiv.org/abs/2206.14483
301-
#
302-
# .. [2] Banville, H., Chehab, O., Hyvärinen, A., Engemann, D. A., & Gramfort, A. (2021).
303-
# Uncovering the structure of clinical EEG signals with self-supervised learning.
304-
# Journal of Neural Engineering, 18(4), 046020.

master/_downloads/a90627a2f09081df5abbe979f9fa0217/plot_data_augmentation_search.ipynb

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
"cell_type": "markdown",
1616
"metadata": {},
1717
"source": [
18-
"\n# Searching the best data augmentation on BCIC IV 2a Dataset\n\nThis tutorial shows how to search data augmentations using braindecode.\nIndeed, it is known that the best augmentation to use often dependent on the task\nor phenomenon studied. Here we follow the methodology proposed in [1]_ on the\nopenly available BCI IV 2a Dataset.\n\n\n.. topic:: Data Augmentation\n\n Data augmentation could be a step in training deep learning models.\n For decoding brain signals, recent studies have shown that artificially\n generating samples may increase the final performance of a deep learning model [1]_.\n Other studies have shown that data augmentation can be used to cast\n a self-supervised paradigm, presenting a more diverse\n view of the data, both with pretext tasks and contrastive learning [2]_.\n\n\nBoth approaches demand an intense comparison to find the best fit with the data.\nThis view is supported by Rommel, C., Paillard, J., Moreau, T., & Gramfort, A. (2022),\nwho demonstrate the importance of the selection the right transformation and\nstrength for each different type of task considered.\nHere, we use the augmentation module present in braindecode in the context of\ntrialwise decoding with the BCI IV 2a dataset.\n :depth: 2\n"
18+
"\n# Searching the best data augmentation on BCIC IV 2a Dataset\n\nThis tutorial shows how to search data augmentations using braindecode.\nIndeed, it is known that the best augmentation to use often dependent on the task\nor phenomenon studied. Here we follow the methodology proposed in [1] on the\nopenly available BCI IV 2a Dataset.\n\n\n.. topic:: Data Augmentation\n\n Data augmentation could be a step in training deep learning models.\n For decoding brain signals, recent studies have shown that artificially\n generating samples may increase the final performance of a deep learning model [1]_.\n Other studies have shown that data augmentation can be used to cast\n a self-supervised paradigm, presenting a more diverse\n view of the data, both with pretext tasks and contrastive learning [2]_.\n\n\nBoth approaches demand an intense comparison to find the best fit with the data.\nThis view is supported by Rommel, C., Paillard, J., Moreau, T., & Gramfort, A. (2022),\nwho demonstrate the importance of the selection the right transformation and\nstrength for each different type of task considered.\nHere, we use the augmentation module present in braindecode in the context of\ntrialwise decoding with the BCI IV 2a dataset.\n\n## References\n\n.. [1] Rommel, C., Paillard, J., Moreau, T., & Gramfort, A. (2022)\n Data augmentation for learning predictive models on EEG:\n a systematic comparison. https://arxiv.org/abs/2206.14483\n\n.. [2] Banville, H., Chehab, O., Hyv\u00e4rinen, A., Engemann, D. A., & Gramfort, A. (2021).\n Uncovering the structure of clinical EEG signals with self-supervised learning.\n Journal of Neural Engineering, 18(4), 046020.\n :depth: 2\n"
1919
]
2020
},
2121
{
@@ -188,7 +188,7 @@
188188
},
189189
"outputs": [],
190190
"source": [
191-
"train_X = SliceDataset(train_set, idx=0)\ntrain_y = array([y for y in SliceDataset(train_set, idx=1)])"
191+
"train_X = SliceDataset(train_set, idx=0)\ntrain_y = array(list(SliceDataset(train_set, idx=1)))"
192192
]
193193
},
194194
{
@@ -224,7 +224,7 @@
224224
},
225225
"outputs": [],
226226
"source": [
227-
"import pandas as pd\nimport numpy as np\n\nsearch_results = pd.DataFrame(search.cv_results_)\n\nbest_run = search_results[search_results['rank_test_score'] == 1].squeeze()\nbest_aug = best_run['params']\nvalidation_score = np.around(best_run['mean_test_score'] * 100, 2).mean()\ntraining_score = np.around(best_run['mean_train_score'] * 100, 2).mean()\n\nreport_message = 'Best augmentation is saved in best_aug which gave a mean validation accuracy' + \\\n 'of {}% (train accuracy of {}%).'.format(validation_score, training_score)\n\nprint(report_message)\n\neval_X = SliceDataset(eval_set, idx=0)\neval_y = SliceDataset(eval_set, idx=1)\nscore = search.score(eval_X, eval_y)\nprint(f'Eval accuracy is {score * 100:.2f}%.')\n\n# References\n# ----------\n#\n# .. [1] Rommel, C., Paillard, J., Moreau, T., & Gramfort, A. (2022)\n# Data augmentation for learning predictive models on EEG:\n# a systematic comparison.\n# https://arxiv.org/abs/2206.14483\n#\n# .. [2] Banville, H., Chehab, O., Hyv\u00e4rinen, A., Engemann, D. A., & Gramfort, A. (2021).\n# Uncovering the structure of clinical EEG signals with self-supervised learning.\n# Journal of Neural Engineering, 18(4), 046020."
227+
"import pandas as pd\nimport numpy as np\n\nsearch_results = pd.DataFrame(search.cv_results_)\n\nbest_run = search_results[search_results['rank_test_score'] == 1].squeeze()\nbest_aug = best_run['params']\nvalidation_score = np.around(best_run['mean_test_score'] * 100, 2).mean()\ntraining_score = np.around(best_run['mean_train_score'] * 100, 2).mean()\n\nreport_message = 'Best augmentation is saved in best_aug which gave a mean validation accuracy' + \\\n 'of {}% (train accuracy of {}%).'.format(validation_score, training_score)\n\nprint(report_message)\n\neval_X = SliceDataset(eval_set, idx=0)\neval_y = SliceDataset(eval_set, idx=1)\nscore = search.score(eval_X, eval_y)\nprint(f'Eval accuracy is {score * 100:.2f}%.')"
228228
]
229229
}
230230
],

master/_downloads/e21937efe066fc624a4b536dced68cd1/plot_relative_positioning.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -293,11 +293,12 @@ def forward(self, x):
293293

294294
######################################################################
295295
# Training
296-
# --------
296+
# ---------
297297
#
298298
# We can now train our network on the pretext task. We use similar
299-
# hyperparameters as in [1]_, but reduce the number of epochs and increase the
300-
# learning rate to account for the smaller setting of this example.
299+
# hyperparameters as in [1]_, but reduce the number of epochs and
300+
# increase the learning rate to account for the smaller setting of
301+
# this example.
301302

302303
import os
303304

Loading
Loading

0 commit comments

Comments
 (0)