From f6ca13886d28d673a2f8bc70012b4eaf35d518fd Mon Sep 17 00:00:00 2001 From: Yaroslav Halchenko Date: Wed, 30 Aug 2023 14:08:31 -0400 Subject: [PATCH] [DATALAD RUNCMD] run codespell throughout fixing typo automagically === Do not change lines below === { "chain": [], "cmd": "codespell -w", "exit": 0, "extra_inputs": [], "inputs": [], "outputs": [], "pwd": "." } ^^^ Do not change lines above ^^^ --- projects/ECoG/exploreAJILE12.ipynb | 4 ++-- projects/ECoG/load_ECoG_motor_imagery.ipynb | 2 +- projects/behavior/Loading_CalMS21_data.ipynb | 2 +- projects/docs/project_guidance.md | 4 ++-- .../fMRI/load_bonner_navigational_affordances.ipynb | 4 ++-- projects/fMRI/load_cichy_fMRI_MEG.ipynb | 4 ++-- projects/fMRI/load_fslcourse.ipynb | 2 +- projects/fMRI/load_hcp.ipynb | 2 +- projects/fMRI/load_hcp_retino.ipynb | 4 ++-- projects/fMRI/load_hcp_task.ipynb | 2 +- projects/fMRI/load_hcp_task_with_behaviour.ipynb | 2 +- projects/modelingsteps/ModelingSteps_1through4.ipynb | 2 +- projects/modelingsteps/ModelingSteps_5through10.ipynb | 6 +++--- projects/modelingsteps/TrainIllusionModel.ipynb | 4 ++-- .../neurons/load_Allen_Visual_Behavior_from_SDK.ipynb | 8 ++++---- ...Allen_Visual_Behavior_from_pre_processed_file.ipynb | 2 +- projects/neurons/load_steinmetz_extra.ipynb | 2 +- projects/neurons/load_stringer_orientations.ipynb | 2 +- projects/theory/motor_RNNs.ipynb | 4 ++-- tutorials/Bonus_Autoencoders/Bonus_Tutorial1.ipynb | 2 +- tutorials/Bonus_Autoencoders/Bonus_Tutorial2.ipynb | 2 +- tutorials/Bonus_Autoencoders/Bonus_Tutorial3.ipynb | 2 +- .../instructor/Bonus_Tutorial1.ipynb | 2 +- .../instructor/Bonus_Tutorial2.ipynb | 2 +- .../instructor/Bonus_Tutorial3.ipynb | 2 +- .../Bonus_Autoencoders/student/Bonus_Tutorial1.ipynb | 2 +- .../Bonus_Autoencoders/student/Bonus_Tutorial2.ipynb | 2 +- .../Bonus_Autoencoders/student/Bonus_Tutorial3.ipynb | 2 +- tutorials/W1D2_ModelFitting/W1D2_Tutorial4.ipynb | 2 +- .../W1D2_ModelFitting/instructor/W1D2_Tutorial4.ipynb | 2 +- .../W1D2_ModelFitting/student/W1D2_Tutorial4.ipynb | 2 +- .../W1D3_GeneralizedLinearModels/W1D3_Tutorial1.ipynb | 4 ++-- .../W1D3_GeneralizedLinearModels/W1D3_Tutorial2.ipynb | 2 +- .../instructor/W1D3_Tutorial1.ipynb | 4 ++-- .../instructor/W1D3_Tutorial2.ipynb | 2 +- .../solutions/W1D3_Tutorial1_Solution_0d56b394.py | 2 +- .../student/W1D3_Tutorial1.ipynb | 2 +- .../student/W1D3_Tutorial2.ipynb | 2 +- tutorials/W1D5_DeepLearning/W1D5_Tutorial1.ipynb | 2 +- tutorials/W1D5_DeepLearning/W1D5_Tutorial2.ipynb | 2 +- tutorials/W1D5_DeepLearning/W1D5_Tutorial3.ipynb | 2 +- tutorials/W1D5_DeepLearning/W1D5_Tutorial4.ipynb | 4 ++-- .../W1D5_DeepLearning/instructor/W1D5_Tutorial1.ipynb | 2 +- .../W1D5_DeepLearning/instructor/W1D5_Tutorial2.ipynb | 2 +- .../W1D5_DeepLearning/instructor/W1D5_Tutorial3.ipynb | 2 +- .../W1D5_DeepLearning/instructor/W1D5_Tutorial4.ipynb | 4 ++-- .../W1D5_DeepLearning/student/W1D5_Tutorial1.ipynb | 2 +- .../W1D5_DeepLearning/student/W1D5_Tutorial2.ipynb | 2 +- .../W1D5_DeepLearning/student/W1D5_Tutorial3.ipynb | 2 +- .../W1D5_DeepLearning/student/W1D5_Tutorial4.ipynb | 4 ++-- tutorials/W2D1_ModelingPractice/W2D1_Tutorial1.ipynb | 2 +- .../instructor/W2D1_Tutorial1.ipynb | 2 +- .../W2D1_ModelingPractice/student/W2D1_Tutorial1.ipynb | 2 +- tutorials/W3D1_BayesianDecisions/W3D1_Tutorial1.ipynb | 4 ++-- tutorials/W3D1_BayesianDecisions/W3D1_Tutorial2.ipynb | 8 ++++---- .../instructor/W3D1_Tutorial1.ipynb | 4 ++-- .../instructor/W3D1_Tutorial2.ipynb | 8 ++++---- .../student/W3D1_Tutorial1.ipynb | 4 ++-- .../student/W3D1_Tutorial2.ipynb | 8 ++++---- tutorials/W3D2_HiddenDynamics/W3D2_Tutorial1.ipynb | 2 +- tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb | 2 +- tutorials/W3D2_HiddenDynamics/W3D2_Tutorial3.ipynb | 4 ++-- tutorials/W3D2_HiddenDynamics/W3D2_Tutorial4.ipynb | 4 ++-- .../instructor/W3D2_Tutorial1.ipynb | 2 +- .../instructor/W3D2_Tutorial2.ipynb | 2 +- .../instructor/W3D2_Tutorial3.ipynb | 4 ++-- .../instructor/W3D2_Tutorial4.ipynb | 4 ++-- .../solutions/W3D2_Tutorial3_Solution_6c26b2f4.py | 2 +- .../W3D2_HiddenDynamics/student/W3D2_Tutorial1.ipynb | 2 +- .../W3D2_HiddenDynamics/student/W3D2_Tutorial2.ipynb | 2 +- .../W3D2_HiddenDynamics/student/W3D2_Tutorial3.ipynb | 2 +- .../W3D2_HiddenDynamics/student/W3D2_Tutorial4.ipynb | 4 ++-- tutorials/W3D3_OptimalControl/W3D3_Tutorial1.ipynb | 10 +++++----- tutorials/W3D3_OptimalControl/W3D3_Tutorial2.ipynb | 8 ++++---- .../instructor/W3D3_Tutorial1.ipynb | 10 +++++----- .../instructor/W3D3_Tutorial2.ipynb | 8 ++++---- .../W3D3_OptimalControl/student/W3D3_Tutorial1.ipynb | 10 +++++----- .../W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb | 8 ++++---- .../W3D4_ReinforcementLearning/W3D4_Tutorial1.ipynb | 2 +- .../W3D4_ReinforcementLearning/W3D4_Tutorial3.ipynb | 2 +- .../W3D4_ReinforcementLearning/W3D4_Tutorial4.ipynb | 2 +- .../instructor/W3D4_Tutorial1.ipynb | 2 +- .../instructor/W3D4_Tutorial3.ipynb | 2 +- .../instructor/W3D4_Tutorial4.ipynb | 2 +- .../student/W3D4_Tutorial1.ipynb | 2 +- .../student/W3D4_Tutorial3.ipynb | 2 +- .../student/W3D4_Tutorial4.ipynb | 2 +- tutorials/W3D5_NetworkCausality/W3D5_Tutorial3.ipynb | 2 +- tutorials/W3D5_NetworkCausality/W3D5_Tutorial4.ipynb | 4 ++-- .../instructor/W3D5_Tutorial3.ipynb | 2 +- .../instructor/W3D5_Tutorial4.ipynb | 4 ++-- .../W3D5_NetworkCausality/student/W3D5_Tutorial3.ipynb | 2 +- .../W3D5_NetworkCausality/student/W3D5_Tutorial4.ipynb | 4 ++-- 93 files changed, 151 insertions(+), 151 deletions(-) diff --git a/projects/ECoG/exploreAJILE12.ipynb b/projects/ECoG/exploreAJILE12.ipynb index 28f3b88bdb..0fe69540d8 100644 --- a/projects/ECoG/exploreAJILE12.ipynb +++ b/projects/ECoG/exploreAJILE12.ipynb @@ -298,7 +298,7 @@ }, "source": [ "### Access to data on cloud\n", - "The data is hosted on [AMAZON AWS](https://aws.amazon.com) in **S3** buckets. The following steps guide you to locate the data based on the **dandiset** information, setup streaming and reading the data from the cloud. Alternatively, you can access the data on **[DANDI](https://dandiarchive.org/dandiset/000055?search=ajile12&pos=1)**. If you choose to directly download from DANDI, you will need a github account. The following code will be sufficient to programatically download/stream data (either for colab notebook or for your own personal machine)." + "The data is hosted on [AMAZON AWS](https://aws.amazon.com) in **S3** buckets. The following steps guide you to locate the data based on the **dandiset** information, setup streaming and reading the data from the cloud. Alternatively, you can access the data on **[DANDI](https://dandiarchive.org/dandiset/000055?search=ajile12&pos=1)**. If you choose to directly download from DANDI, you will need a github account. The following code will be sufficient to programmatically download/stream data (either for colab notebook or for your own personal machine)." ] }, { @@ -857,7 +857,7 @@ "execution": {} }, "source": [ - "Each subject has multiple experimental sessions. You can check that programatically." + "Each subject has multiple experimental sessions. You can check that programmatically." ] }, { diff --git a/projects/ECoG/load_ECoG_motor_imagery.ipynb b/projects/ECoG/load_ECoG_motor_imagery.ipynb index 41b1eaa8f6..7bd5a2cb31 100644 --- a/projects/ECoG/load_ECoG_motor_imagery.ipynb +++ b/projects/ECoG/load_ECoG_motor_imagery.ipynb @@ -127,7 +127,7 @@ "\n", "`dat1` and `dat2` are data from the two blocks performed in each subject. The first one was the actual movements, the second one was motor imagery. For the movement task, from the original dataset instructions:\n", "\n", - "*Patients performed simple, repetitive, motor tasks of hand (synchronous flexion and extension of all fingers, i.e., clenching and releasing a fist at a self-paced rate of ~1-2 Hz) or tongue (opening of mouth with protrusion and retraction of the tongue, i.e., sticking the tongue in and out, also at ~1-2 Hz). These movements were performed in an interval-based manner, alternating between movement and rest, and the side of move- ment was always contralateral to the side of cortical grid placement.*\n", + "*Patients performed simple, repetitive, motor tasks of hand (synchronous flexion and extension of all fingers, i.e., clenching and releasing a fist at a self-paced rate of ~1-2 Hz) or tongue (opening of mouth with protrusion and retraction of the tongue, i.e., sticking the tongue in and out, also at ~1-2 Hz). These movements were performed in an interval-based manner, alternating between movement and rest, and the side of move- meant was always contralateral to the side of cortical grid placement.*\n", "\n", "
\n", "\n", diff --git a/projects/behavior/Loading_CalMS21_data.ipynb b/projects/behavior/Loading_CalMS21_data.ipynb index 106a770af3..49931835db 100644 --- a/projects/behavior/Loading_CalMS21_data.ipynb +++ b/projects/behavior/Loading_CalMS21_data.ipynb @@ -42845,7 +42845,7 @@ " stop_frame=5100,\n", " annotation_sequence=annotation_sequence)\n", "\n", - "# Display the animaion on colab\n", + "# Display the animation on colab\n", "ani" ] }, diff --git a/projects/docs/project_guidance.md b/projects/docs/project_guidance.md index d3ec2bf3c3..b57854e2b2 100644 --- a/projects/docs/project_guidance.md +++ b/projects/docs/project_guidance.md @@ -84,7 +84,7 @@ We have designed tutorials to help launch your projects. Once you're done with t (2h) Complete the intro/tutorial/outro for this day * You will need to use your group's project for some of this content. If you don’t have concrete ideas yet, or you haven’t done a research project before, use one of the provided project templates to walk through the four steps. * If you are using a project template, your goal is to translate the information from the slide and colab notebook into a 4-step format. Some information might not be readily available in the slide or notebook, and you might have to find it in your literature review later this day. -* Try to write down a few sentences for each of the four steps applied to your project. You will re-use these in your proposal later today. +* Try to write down a few sentences for each of the four steps applied to your project. You will reuse these in your proposal later today. (2.5h) Literature review: identify interesting papers The goal of this literature review is to situate your question in context and help you acquire some keywords that you will use in your proposal today. @@ -95,7 +95,7 @@ The goal of this literature review is to situate your question in context and he Project block task: (3h) Project proposal -* Try to write a proposal for this project based on the way you understand it now. This should re-use some of the text you wrote down for the four steps, and should include keywords and concepts that you identified in your literature review. Don’t worry too much about the structure of this paragraph! The goal is to get as many words (200-300) on paper as possible. You have the entire day 10 to learn how to write a properly structured scientific abstract. +* Try to write a proposal for this project based on the way you understand it now. This should reuse some of the text you wrote down for the four steps, and should include keywords and concepts that you identified in your literature review. Don’t worry too much about the structure of this paragraph! The goal is to get as many words (200-300) on paper as possible. You have the entire day 10 to learn how to write a properly structured scientific abstract. * It is important to include the concepts which you identified as relevant, and the keywords that go with them. * When you are ready, please submit your proposal [here](https://airtable.com/shrcYuFYMPh4jGIng). This is not mandatory and can be submitted at any time. We won't evaluate this, but we will use it to keep track of the overall progress of the groups. diff --git a/projects/fMRI/load_bonner_navigational_affordances.ipynb b/projects/fMRI/load_bonner_navigational_affordances.ipynb index 4242e8acbb..c4260a5310 100644 --- a/projects/fMRI/load_bonner_navigational_affordances.ipynb +++ b/projects/fMRI/load_bonner_navigational_affordances.ipynb @@ -340,7 +340,7 @@ "execution": {} }, "source": [ - "`Trajs.mat` contain data on the trajectories drawn by subjects during the evaluation phase before main experiment. The data is organised like `[n_images, heigth, width, n_evaluators]` There is a data on 173 images, of which 50 were presented to the participants. The filenames are stored as `dtype`.\n" + "`Trajs.mat` contain data on the trajectories drawn by subjects during the evaluation phase before main experiment. The data is organised like `[n_images, height, width, n_evaluators]` There is a data on 173 images, of which 50 were presented to the participants. The filenames are stored as `dtype`.\n" ] }, { @@ -364,7 +364,7 @@ ], "source": [ "trajs = loadmat('affordances/Trajs.mat')['Trajs']\n", - "fnames = trajs.dtype.names # filenames get loaded as custom dtypes due and type of array is initialy np.void due to peculiarites of how it was saved in Matlab.\n", + "fnames = trajs.dtype.names # filenames get loaded as custom dtypes due and type of array is initially np.void due to peculiarites of how it was saved in Matlab.\n", "trajs = np.asarray(trajs[0][0].tolist()) # turn np.void into float32\n", "trajs.shape" ] diff --git a/projects/fMRI/load_cichy_fMRI_MEG.ipynb b/projects/fMRI/load_cichy_fMRI_MEG.ipynb index 5d834bdeb3..a6cc4485a4 100644 --- a/projects/fMRI/load_cichy_fMRI_MEG.ipynb +++ b/projects/fMRI/load_cichy_fMRI_MEG.ipynb @@ -151,7 +151,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Downlading data...\n", + "Downloading data...\n", "Download completed!\n" ] } @@ -173,7 +173,7 @@ " if r.status_code != requests.codes.ok:\n", " print(\"!!! Failed to download data !!!\")\n", " else:\n", - " print(\"Downlading data...\")\n", + " print(\"Downloading data...\")\n", " with open(fname, \"wb\") as fid:\n", " fid.write(r.content)\n", " with zipfile.ZipFile(fname, 'r') as zip_ref:\n", diff --git a/projects/fMRI/load_fslcourse.ipynb b/projects/fMRI/load_fslcourse.ipynb index bafc0cbc54..4d62b0156e 100644 --- a/projects/fMRI/load_fslcourse.ipynb +++ b/projects/fMRI/load_fslcourse.ipynb @@ -371,7 +371,7 @@ "execution": {} }, "source": [ - "Next we will convolve ouur regressors with the HRF. This is because the FMRI signal is a sluggish blood signal that lags behind neural signal. " + "Next we will convolve our regressors with the HRF. This is because the FMRI signal is a sluggish blood signal that lags behind neural signal. " ] }, { diff --git a/projects/fMRI/load_hcp.ipynb b/projects/fMRI/load_hcp.ipynb index cd779a40a7..2d39b7703d 100644 --- a/projects/fMRI/load_hcp.ipynb +++ b/projects/fMRI/load_hcp.ipynb @@ -1266,7 +1266,7 @@ "outputs": [], "source": [ "task = \"motor\"\n", - "conditions = [\"lf\", \"rf\"] # Run a substraction analysis between two conditions\n", + "conditions = [\"lf\", \"rf\"] # Run a subtraction analysis between two conditions\n", "\n", "contrast = []\n", "for subject in subjects:\n", diff --git a/projects/fMRI/load_hcp_retino.ipynb b/projects/fMRI/load_hcp_retino.ipynb index 490b0dbdb4..78ebb26afe 100644 --- a/projects/fMRI/load_hcp_retino.ipynb +++ b/projects/fMRI/load_hcp_retino.ipynb @@ -25,7 +25,7 @@ "\n", "In order to use this dataset, please electronically sign the HCP data use terms at [ConnectomeDB](https://db.humanconnectome.org). Instructions for this are on pp. 24-25 of the [HCP Reference Manual](https://www.humanconnectome.org/storage/app/media/documentation/s1200/HCP_S1200_Release_Reference_Manual.pdf).\n", "\n", - "The data and experiment are decribed in detail in [Benson et al.](https://jov.arvojournals.org/article.aspx?articleid=2719988#207329261)" + "The data and experiment are described in detail in [Benson et al.](https://jov.arvojournals.org/article.aspx?articleid=2719988#207329261)" ] }, { @@ -292,7 +292,7 @@ "source": [ "The design matrrix above is made of three columns. One for a cosine wave, one for a sine wave, and one constant columns. \n", "\n", - "The first two columns together can fit a sinusoid of aritrary phase. The last column will help fit the mean of the data.\n", + "The first two columns together can fit a sinusoid of arbitrary phase. The last column will help fit the mean of the data.\n", "\n", "This is a linear model of the form $y = M\\beta$ which we can invert using $\\hat{\\beta}=M^+y$ where $M^+$ is the pseudoinverse of $M$.\n" ] diff --git a/projects/fMRI/load_hcp_task.ipynb b/projects/fMRI/load_hcp_task.ipynb index e44164c939..e90e61bda5 100644 --- a/projects/fMRI/load_hcp_task.ipynb +++ b/projects/fMRI/load_hcp_task.ipynb @@ -638,7 +638,7 @@ "source": [ "# Visualising the results on a brain\n", "\n", - "Finally, we will visualise these resuts on the cortical surface of an average brain." + "Finally, we will visualise these results on the cortical surface of an average brain." ] }, { diff --git a/projects/fMRI/load_hcp_task_with_behaviour.ipynb b/projects/fMRI/load_hcp_task_with_behaviour.ipynb index 419749afdd..e18d5d1f7f 100644 --- a/projects/fMRI/load_hcp_task_with_behaviour.ipynb +++ b/projects/fMRI/load_hcp_task_with_behaviour.ipynb @@ -569,7 +569,7 @@ "source": [ "# Visualising the results on a brain\n", "\n", - "Finally, we will visualise these resuts on the cortical surface of an average brain." + "Finally, we will visualise these results on the cortical surface of an average brain." ] }, { diff --git a/projects/modelingsteps/ModelingSteps_1through4.ipynb b/projects/modelingsteps/ModelingSteps_1through4.ipynb index 1c42311c91..60f8ebc026 100644 --- a/projects/modelingsteps/ModelingSteps_1through4.ipynb +++ b/projects/modelingsteps/ModelingSteps_1through4.ipynb @@ -1170,7 +1170,7 @@ "\n", "\n", "where *S* is the illusion strength and *N* is the noise level, and *k* is a free parameter.\n", - ">we could simply use the frequency of occurance across repetitions as the \"strength of the illusion\"\n", + ">we could simply use the frequency of occurrence across repetitions as the \"strength of the illusion\"\n", "\n", "We would get the noise as the standard deviation of *v(t)*, i.e.\n", "\n", diff --git a/projects/modelingsteps/ModelingSteps_5through10.ipynb b/projects/modelingsteps/ModelingSteps_5through10.ipynb index c47a8776b6..c3c3ce5e21 100644 --- a/projects/modelingsteps/ModelingSteps_5through10.ipynb +++ b/projects/modelingsteps/ModelingSteps_5through10.ipynb @@ -289,7 +289,7 @@ "* **outputs**: these are the predictions our model will make that you could portentially measure (e.g., in your idealized experiment)\n", "* **model functions**: A set of functions that perform the hypothesized computations.\n", "\n", - "You will thus need to define a set of functions that take your data and some parameters as input, can run your model, and output a prediction for a hypothetical measurment.\n", + "You will thus need to define a set of functions that take your data and some parameters as input, can run your model, and output a prediction for a hypothetical measurement.\n", "\n", "**Guiding principles**:\n", "* Keep it as simple as possible!\n", @@ -458,7 +458,7 @@ " - e.g., our intuition is really bad when it comes to dynamical systems\n", "\n", "4. Not using standard model testing tools\n", - " - each field has developped specific mathematical tools to test model behaviors. You'll be expected to show such evaluations. Make use of them early on!" + " - each field has developed specific mathematical tools to test model behaviors. You'll be expected to show such evaluations. Make use of them early on!" ] }, { @@ -844,7 +844,7 @@ "\n", "3. Thinking you don't need figures to explain your model\n", " - your model draft is a great starting point!\n", - " - make figures that provide intuition about model behavior (just like you would create figures to provide intuition about expeimental data)\n", + " - make figures that provide intuition about model behavior (just like you would create figures to provide intuition about experimental data)\n", "\n", "4. My code is too mesy to be published\n", " - not an option (many journal now rightfully require it)\n", diff --git a/projects/modelingsteps/TrainIllusionModel.ipynb b/projects/modelingsteps/TrainIllusionModel.ipynb index 194e5f7e4a..8ddbbaa8fe 100644 --- a/projects/modelingsteps/TrainIllusionModel.ipynb +++ b/projects/modelingsteps/TrainIllusionModel.ipynb @@ -99,7 +99,7 @@ "Our main hypothesis is that the strength of the illusion has a linear relationship to the amplitude of vestibular noise.\n", "\n", "Mathematically, this would write as $S = k \\cdot N$, where $S$ is the illusion strength and $N$ is the noise level, and $k$ is a free parameter.\n", - ">we could simply use the frequency of occurance across repetitions as the \"strength of the illusion\"\n", + ">we could simply use the frequency of occurrence across repetitions as the \"strength of the illusion\"\n", "\n", "We would get the noise as the standard deviation of $v(t)$, i.e. $N=\\mathbf{E}[v(t)^2]$, where $\\mathbf{E}$ stands for the expected value.\n", "\n", @@ -496,7 +496,7 @@ "\n", "*Part of step 9*\n", "\n", - "Ok, so we still need to actually evaluate and test our model performance. Since this is a conceptual model and we don't have actual data (yet), we will evaluate how our model behaves as a function of the 3 parameters. If we had data with different conditions, we could try to fit the model to the data and evaluate the goodness of fit, etc... If other alterative models existed, we could evaluate our model against those alternatives too.\n", + "Ok, so we still need to actually evaluate and test our model performance. Since this is a conceptual model and we don't have actual data (yet), we will evaluate how our model behaves as a function of the 3 parameters. If we had data with different conditions, we could try to fit the model to the data and evaluate the goodness of fit, etc... If other alternative models existed, we could evaluate our model against those alternatives too.\n", "\n", "So let's run out model in different parameter regimes and analyze the result to get some insight into the model performance" ] diff --git a/projects/neurons/load_Allen_Visual_Behavior_from_SDK.ipynb b/projects/neurons/load_Allen_Visual_Behavior_from_SDK.ipynb index 16e46ece0f..6d269efc8d 100644 --- a/projects/neurons/load_Allen_Visual_Behavior_from_SDK.ipynb +++ b/projects/neurons/load_Allen_Visual_Behavior_from_SDK.ipynb @@ -42,7 +42,7 @@ "execution": {} }, "source": [ - "We have built a package called `brain_observatory_utilities` which contains some useful convenience functions. The `allenSDK` is a dependency of this package and will be automatically installed when you install `brain_observatory_utilities` per the instrutions below.\n", + "We have built a package called `brain_observatory_utilities` which contains some useful convenience functions. The `allenSDK` is a dependency of this package and will be automatically installed when you install `brain_observatory_utilities` per the instructions below.\n", "\n", "We will first install `brain_observatory_utilities` into our colab environment by running the commands below. When this cell is complete, click on the `RESTART RUNTIME` button that appears at the end of the output. Note that running this cell will produce a long list of outputs and some error messages. Clicking `RESTART RUNTIME` at the end will resolve these issues.\n", "\n", @@ -1670,7 +1670,7 @@ "\n", "It will also include a subset of metadata from `ophys_experiment_table` to facilitate splitting by depth, structure (aka cortical area), cre line (aka cell class), etc.\n", "\n", - "Note that 'tidy' data means that each row represents only one observation. Observations are stacked vertically. Thus, the `timestamps` colums will repeat for every cell in the dataset." + "Note that 'tidy' data means that each row represents only one observation. Observations are stacked vertically. Thus, the `timestamps` columns will repeat for every cell in the dataset." ] }, { @@ -3970,7 +3970,7 @@ "execution": {} }, "source": [ - "We can see that the output has colums for\n", + "We can see that the output has columns for\n", "* `time` - this is our new timebase relative to the events. In this case, it ranges from -3 to 3\n", "* `dff` - this is the deltaF/F value surrounding each event, interpolated onto the new timebase. If, when calling the `event_triggered_response` function we had passed `y = 'events'`, this column would be events instead of dff.\n", "* `event_number` - this is an integer representing the count of each event. In this example, there were 185 omissions, so they are numbered from 0 to 184\n", @@ -4185,7 +4185,7 @@ "execution": {} }, "source": [ - "Note that the regular, image-driven responses with a 750 ms inter-stimulus interval are visible everywhere except at t=0, which is when the unexpectedly omitted stimulus occured." + "Note that the regular, image-driven responses with a 750 ms inter-stimulus interval are visible everywhere except at t=0, which is when the unexpectedly omitted stimulus occurred." ] }, { diff --git a/projects/neurons/load_Allen_Visual_Behavior_from_pre_processed_file.ipynb b/projects/neurons/load_Allen_Visual_Behavior_from_pre_processed_file.ipynb index c268f06a07..02f1d0498d 100644 --- a/projects/neurons/load_Allen_Visual_Behavior_from_pre_processed_file.ipynb +++ b/projects/neurons/load_Allen_Visual_Behavior_from_pre_processed_file.ipynb @@ -57,7 +57,7 @@ "execution": {} }, "source": [ - "##### Multiple cortical areas and depths were measured concurently in each session, at a sample rate of 11Hz.\n", + "##### Multiple cortical areas and depths were measured concurrently in each session, at a sample rate of 11Hz.\n", "##### Data was collected from excitatory and inhibitory neural populations.\n", " " ] diff --git a/projects/neurons/load_steinmetz_extra.ipynb b/projects/neurons/load_steinmetz_extra.ipynb index 8e0645bdc0..44f1643cd5 100644 --- a/projects/neurons/load_steinmetz_extra.ipynb +++ b/projects/neurons/load_steinmetz_extra.ipynb @@ -127,7 +127,7 @@ "execution": {} }, "source": [ - "`dat_LFP`, `dat_WAV`, `dat_ST` contain 39 sessions from 10 mice, data from Steinmetz et al, 2019, supplemental to the main data provided for NMA. Time bins for all measurements are 10ms, starting 500ms before stimulus onset (same as the main data). The followin fields are available across the three supplemental files. \n", + "`dat_LFP`, `dat_WAV`, `dat_ST` contain 39 sessions from 10 mice, data from Steinmetz et al, 2019, supplemental to the main data provided for NMA. Time bins for all measurements are 10ms, starting 500ms before stimulus onset (same as the main data). The following fields are available across the three supplemental files. \n", "\n", "* `dat['lfp']`: recording of the local field potential in each brain area from this experiment, binned at `10ms`.\n", "* `dat['brain_area_lfp']`: brain area names for the LFP channels. \n", diff --git a/projects/neurons/load_stringer_orientations.ipynb b/projects/neurons/load_stringer_orientations.ipynb index 6e385b967d..a645535529 100644 --- a/projects/neurons/load_stringer_orientations.ipynb +++ b/projects/neurons/load_stringer_orientations.ipynb @@ -42,7 +42,7 @@ } ], "source": [ - "# @title Install depedencies\n", + "# @title Install dependencies\n", "!pip install umap-learn --quiet" ] }, diff --git a/projects/theory/motor_RNNs.ipynb b/projects/theory/motor_RNNs.ipynb index cbc0d67c7d..f6c7d1cf07 100644 --- a/projects/theory/motor_RNNs.ipynb +++ b/projects/theory/motor_RNNs.ipynb @@ -567,7 +567,7 @@ "def plot_reaching_task_stimuli(stimulus, n_targets:int, tsteps:int, T:int):\n", "\n", " # plot target cue with \"pulse_steps\" duration\n", - " # at the beginnning of each trial\n", + " # at the beginning of each trial\n", " stimulus_set = np.arange(0, n_targets,1)\n", "\n", " fig, axes = plt.subplots(n_targets, 1, figsize=(30,9))\n", @@ -645,7 +645,7 @@ "def plot_force_stimuli(stimulus, n_targets:int, tsteps:int, T:int):\n", "\n", " # plot target cue with \"pulse_steps\" duration\n", - " # at the beginnning of each trial\n", + " # at the beginning of each trial\n", " stimulus_set = np.arange(0, n_targets, 1)\n", " fig, axes = plt.subplots(n_targets, 1, figsize=(30,9))\n", " for target in stimulus_set:\n", diff --git a/tutorials/Bonus_Autoencoders/Bonus_Tutorial1.ipynb b/tutorials/Bonus_Autoencoders/Bonus_Tutorial1.ipynb index b68daf4dd7..7be32b6d8b 100644 --- a/tutorials/Bonus_Autoencoders/Bonus_Tutorial1.ipynb +++ b/tutorials/Bonus_Autoencoders/Bonus_Tutorial1.ipynb @@ -1955,7 +1955,7 @@ "```python\n", "model.apply(init_weights_kaiming_uniform)\n", "```\n", - "An alternative is to sample from a gaussian distribution $\\mathcal{N}(\\mu, \\sigma^2)$ with $\\mu=0$ and $\\sigma=1/\\sqrt{fan\\_in}$. Example for reseting all but the two last autoencoder layers to Kaiming normal:\n", + "An alternative is to sample from a gaussian distribution $\\mathcal{N}(\\mu, \\sigma^2)$ with $\\mu=0$ and $\\sigma=1/\\sqrt{fan\\_in}$. Example for resetting all but the two last autoencoder layers to Kaiming normal:\n", "\n", "```python\n", "model[:-2].apply(init_weights_kaiming_normal)\n", diff --git a/tutorials/Bonus_Autoencoders/Bonus_Tutorial2.ipynb b/tutorials/Bonus_Autoencoders/Bonus_Tutorial2.ipynb index 8d85902da7..bb792ef60a 100644 --- a/tutorials/Bonus_Autoencoders/Bonus_Tutorial2.ipynb +++ b/tutorials/Bonus_Autoencoders/Bonus_Tutorial2.ipynb @@ -78,7 +78,7 @@ }, "outputs": [], "source": [ - "# @title Install dependecies\n", + "# @title Install dependencies\n", "!pip install plotly --quiet" ] }, diff --git a/tutorials/Bonus_Autoencoders/Bonus_Tutorial3.ipynb b/tutorials/Bonus_Autoencoders/Bonus_Tutorial3.ipynb index 2e8d7ce7ca..18755a89fc 100644 --- a/tutorials/Bonus_Autoencoders/Bonus_Tutorial3.ipynb +++ b/tutorials/Bonus_Autoencoders/Bonus_Tutorial3.ipynb @@ -1154,7 +1154,7 @@ "\n", "We provide the functions `save_checkpoint`, `load_checkpoint`, and `reset_checkpoint` to implement the steps above and download pre-trained weights from the GitHub repo.\n", "\n", - "If downloading from GitHub fails, please uncomment the 3rd cell bellow to train the model for `n_epochs=10` and save it locally.\n", + "If downloading from GitHub fails, please uncomment the 3rd cell below to train the model for `n_epochs=10` and save it locally.\n", "\n", "**Instructions:**\n", "* Please execute the cell(s) below" diff --git a/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial1.ipynb b/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial1.ipynb index 726e91811f..f6deee7c57 100644 --- a/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial1.ipynb +++ b/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial1.ipynb @@ -1959,7 +1959,7 @@ "```python\n", "model.apply(init_weights_kaiming_uniform)\n", "```\n", - "An alternative is to sample from a gaussian distribution $\\mathcal{N}(\\mu, \\sigma^2)$ with $\\mu=0$ and $\\sigma=1/\\sqrt{fan\\_in}$. Example for reseting all but the two last autoencoder layers to Kaiming normal:\n", + "An alternative is to sample from a gaussian distribution $\\mathcal{N}(\\mu, \\sigma^2)$ with $\\mu=0$ and $\\sigma=1/\\sqrt{fan\\_in}$. Example for resetting all but the two last autoencoder layers to Kaiming normal:\n", "\n", "```python\n", "model[:-2].apply(init_weights_kaiming_normal)\n", diff --git a/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial2.ipynb b/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial2.ipynb index b572d0b615..ff12414745 100644 --- a/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial2.ipynb +++ b/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial2.ipynb @@ -78,7 +78,7 @@ }, "outputs": [], "source": [ - "# @title Install dependecies\n", + "# @title Install dependencies\n", "!pip install plotly --quiet" ] }, diff --git a/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial3.ipynb b/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial3.ipynb index 6ea2287dae..a20c26746c 100644 --- a/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial3.ipynb +++ b/tutorials/Bonus_Autoencoders/instructor/Bonus_Tutorial3.ipynb @@ -1154,7 +1154,7 @@ "\n", "We provide the functions `save_checkpoint`, `load_checkpoint`, and `reset_checkpoint` to implement the steps above and download pre-trained weights from the GitHub repo.\n", "\n", - "If downloading from GitHub fails, please uncomment the 3rd cell bellow to train the model for `n_epochs=10` and save it locally.\n", + "If downloading from GitHub fails, please uncomment the 3rd cell below to train the model for `n_epochs=10` and save it locally.\n", "\n", "**Instructions:**\n", "* Please execute the cell(s) below" diff --git a/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial1.ipynb b/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial1.ipynb index 8aecc46ca0..e1a8f885e1 100644 --- a/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial1.ipynb +++ b/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial1.ipynb @@ -1937,7 +1937,7 @@ "```python\n", "model.apply(init_weights_kaiming_uniform)\n", "```\n", - "An alternative is to sample from a gaussian distribution $\\mathcal{N}(\\mu, \\sigma^2)$ with $\\mu=0$ and $\\sigma=1/\\sqrt{fan\\_in}$. Example for reseting all but the two last autoencoder layers to Kaiming normal:\n", + "An alternative is to sample from a gaussian distribution $\\mathcal{N}(\\mu, \\sigma^2)$ with $\\mu=0$ and $\\sigma=1/\\sqrt{fan\\_in}$. Example for resetting all but the two last autoencoder layers to Kaiming normal:\n", "\n", "```python\n", "model[:-2].apply(init_weights_kaiming_normal)\n", diff --git a/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial2.ipynb b/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial2.ipynb index ee99b81f04..7f97fcf433 100644 --- a/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial2.ipynb +++ b/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial2.ipynb @@ -78,7 +78,7 @@ }, "outputs": [], "source": [ - "# @title Install dependecies\n", + "# @title Install dependencies\n", "!pip install plotly --quiet" ] }, diff --git a/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial3.ipynb b/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial3.ipynb index 24cf175af6..f486f06356 100644 --- a/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial3.ipynb +++ b/tutorials/Bonus_Autoencoders/student/Bonus_Tutorial3.ipynb @@ -1154,7 +1154,7 @@ "\n", "We provide the functions `save_checkpoint`, `load_checkpoint`, and `reset_checkpoint` to implement the steps above and download pre-trained weights from the GitHub repo.\n", "\n", - "If downloading from GitHub fails, please uncomment the 3rd cell bellow to train the model for `n_epochs=10` and save it locally.\n", + "If downloading from GitHub fails, please uncomment the 3rd cell below to train the model for `n_epochs=10` and save it locally.\n", "\n", "**Instructions:**\n", "* Please execute the cell(s) below" diff --git a/tutorials/W1D2_ModelFitting/W1D2_Tutorial4.ipynb b/tutorials/W1D2_ModelFitting/W1D2_Tutorial4.ipynb index a2f689a9c3..2386661902 100644 --- a/tutorials/W1D2_ModelFitting/W1D2_Tutorial4.ipynb +++ b/tutorials/W1D2_ModelFitting/W1D2_Tutorial4.ipynb @@ -236,7 +236,7 @@ "\n", "This matrix $\\mathbf{X}$ is often referred to as the \"[design matrix](https://en.wikipedia.org/wiki/Design_matrix)\".\n", "\n", - "We want to find an optimal vector of paramters $\\boldsymbol{\\hat\\theta}$. Recall our analytic solution to minimizing MSE for a single regressor:\n", + "We want to find an optimal vector of parameters $\\boldsymbol{\\hat\\theta}$. Recall our analytic solution to minimizing MSE for a single regressor:\n", "\n", "\\begin{equation}\n", "\\hat\\theta = \\frac{\\sum_{i=1}^N x_i y_i}{\\sum_{i=1}^N x_i^2}.\n", diff --git a/tutorials/W1D2_ModelFitting/instructor/W1D2_Tutorial4.ipynb b/tutorials/W1D2_ModelFitting/instructor/W1D2_Tutorial4.ipynb index 6ce9139552..835c239ad7 100644 --- a/tutorials/W1D2_ModelFitting/instructor/W1D2_Tutorial4.ipynb +++ b/tutorials/W1D2_ModelFitting/instructor/W1D2_Tutorial4.ipynb @@ -236,7 +236,7 @@ "\n", "This matrix $\\mathbf{X}$ is often referred to as the \"[design matrix](https://en.wikipedia.org/wiki/Design_matrix)\".\n", "\n", - "We want to find an optimal vector of paramters $\\boldsymbol{\\hat\\theta}$. Recall our analytic solution to minimizing MSE for a single regressor:\n", + "We want to find an optimal vector of parameters $\\boldsymbol{\\hat\\theta}$. Recall our analytic solution to minimizing MSE for a single regressor:\n", "\n", "\\begin{equation}\n", "\\hat\\theta = \\frac{\\sum_{i=1}^N x_i y_i}{\\sum_{i=1}^N x_i^2}.\n", diff --git a/tutorials/W1D2_ModelFitting/student/W1D2_Tutorial4.ipynb b/tutorials/W1D2_ModelFitting/student/W1D2_Tutorial4.ipynb index 8a6ad1e4f5..de0a393f24 100644 --- a/tutorials/W1D2_ModelFitting/student/W1D2_Tutorial4.ipynb +++ b/tutorials/W1D2_ModelFitting/student/W1D2_Tutorial4.ipynb @@ -236,7 +236,7 @@ "\n", "This matrix $\\mathbf{X}$ is often referred to as the \"[design matrix](https://en.wikipedia.org/wiki/Design_matrix)\".\n", "\n", - "We want to find an optimal vector of paramters $\\boldsymbol{\\hat\\theta}$. Recall our analytic solution to minimizing MSE for a single regressor:\n", + "We want to find an optimal vector of parameters $\\boldsymbol{\\hat\\theta}$. Recall our analytic solution to minimizing MSE for a single regressor:\n", "\n", "\\begin{equation}\n", "\\hat\\theta = \\frac{\\sum_{i=1}^N x_i y_i}{\\sum_{i=1}^N x_i^2}.\n", diff --git a/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial1.ipynb b/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial1.ipynb index 63aa8ce524..c9983ea15b 100644 --- a/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial1.ipynb +++ b/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial1.ipynb @@ -1107,7 +1107,7 @@ " # Use a random vector of weights to start (mean 0, sd .2)\n", " x0 = np.random.normal(0, .2, d + 1)\n", "\n", - " # Find parameters that minmize the negative log likelihood function\n", + " # Find parameters that minimize the negative log likelihood function\n", " res = minimize(..., args=(X, y))\n", "\n", " return ...\n", @@ -1172,7 +1172,7 @@ " # Use a random vector of weights to start (mean 0, sd .2)\n", " x0 = np.random.normal(0, .2, d + 1)\n", "\n", - " # Find parameters that minmize the negative log likelihood function\n", + " # Find parameters that minimize the negative log likelihood function\n", " res = minimize(neg_log_lik_lnp, x0, args=(X, y))\n", "\n", " return res[\"x\"]\n", diff --git a/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial2.ipynb b/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial2.ipynb index 6d9403d109..5275c968ff 100644 --- a/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial2.ipynb +++ b/tutorials/W1D3_GeneralizedLinearModels/W1D3_Tutorial2.ipynb @@ -963,7 +963,7 @@ "
\n", " Click here for text recap of video \n", "\n", - "Regularization forces a model to learn a set solutions you *a priori* believe to be more correct, which reduces overfitting because it doesn't have as much flexibility to fit idiosyncracies in the training data. This adds model bias, but it's a good bias because you know (maybe) that parameters should be small or mostly 0.\n", + "Regularization forces a model to learn a set solutions you *a priori* believe to be more correct, which reduces overfitting because it doesn't have as much flexibility to fit idiosyncrasies in the training data. This adds model bias, but it's a good bias because you know (maybe) that parameters should be small or mostly 0.\n", "\n", "In a GLM, a common form of regularization is to *shrink* the classifier weights. In a linear model, you can see its effect by plotting the weights. We've defined a helper function, `plot_weights`, that we'll use extensively in this section.\n", "\n", diff --git a/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial1.ipynb b/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial1.ipynb index b407008f0e..c94b39c9eb 100644 --- a/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial1.ipynb +++ b/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial1.ipynb @@ -1111,7 +1111,7 @@ " # Use a random vector of weights to start (mean 0, sd .2)\n", " x0 = np.random.normal(0, .2, d + 1)\n", "\n", - " # Find parameters that minmize the negative log likelihood function\n", + " # Find parameters that minimize the negative log likelihood function\n", " res = minimize(..., args=(X, y))\n", "\n", " return ...\n", @@ -1178,7 +1178,7 @@ " # Use a random vector of weights to start (mean 0, sd .2)\n", " x0 = np.random.normal(0, .2, d + 1)\n", "\n", - " # Find parameters that minmize the negative log likelihood function\n", + " # Find parameters that minimize the negative log likelihood function\n", " res = minimize(neg_log_lik_lnp, x0, args=(X, y))\n", "\n", " return res[\"x\"]\n", diff --git a/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial2.ipynb b/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial2.ipynb index fd75d9c770..d2d417ca31 100644 --- a/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial2.ipynb +++ b/tutorials/W1D3_GeneralizedLinearModels/instructor/W1D3_Tutorial2.ipynb @@ -967,7 +967,7 @@ "
\n", " Click here for text recap of video \n", "\n", - "Regularization forces a model to learn a set solutions you *a priori* believe to be more correct, which reduces overfitting because it doesn't have as much flexibility to fit idiosyncracies in the training data. This adds model bias, but it's a good bias because you know (maybe) that parameters should be small or mostly 0.\n", + "Regularization forces a model to learn a set solutions you *a priori* believe to be more correct, which reduces overfitting because it doesn't have as much flexibility to fit idiosyncrasies in the training data. This adds model bias, but it's a good bias because you know (maybe) that parameters should be small or mostly 0.\n", "\n", "In a GLM, a common form of regularization is to *shrink* the classifier weights. In a linear model, you can see its effect by plotting the weights. We've defined a helper function, `plot_weights`, that we'll use extensively in this section.\n", "\n", diff --git a/tutorials/W1D3_GeneralizedLinearModels/solutions/W1D3_Tutorial1_Solution_0d56b394.py b/tutorials/W1D3_GeneralizedLinearModels/solutions/W1D3_Tutorial1_Solution_0d56b394.py index 29fa7d7586..82353d108d 100644 --- a/tutorials/W1D3_GeneralizedLinearModels/solutions/W1D3_Tutorial1_Solution_0d56b394.py +++ b/tutorials/W1D3_GeneralizedLinearModels/solutions/W1D3_Tutorial1_Solution_0d56b394.py @@ -38,7 +38,7 @@ def fit_lnp(stim, spikes, d=25): # Use a random vector of weights to start (mean 0, sd .2) x0 = np.random.normal(0, .2, d + 1) - # Find parameters that minmize the negative log likelihood function + # Find parameters that minimize the negative log likelihood function res = minimize(neg_log_lik_lnp, x0, args=(X, y)) return res["x"] diff --git a/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial1.ipynb b/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial1.ipynb index 6390e06943..c68ce026ab 100644 --- a/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial1.ipynb +++ b/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial1.ipynb @@ -1051,7 +1051,7 @@ " # Use a random vector of weights to start (mean 0, sd .2)\n", " x0 = np.random.normal(0, .2, d + 1)\n", "\n", - " # Find parameters that minmize the negative log likelihood function\n", + " # Find parameters that minimize the negative log likelihood function\n", " res = minimize(..., args=(X, y))\n", "\n", " return ...\n", diff --git a/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial2.ipynb b/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial2.ipynb index 730cda2167..16fe216097 100644 --- a/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial2.ipynb +++ b/tutorials/W1D3_GeneralizedLinearModels/student/W1D3_Tutorial2.ipynb @@ -933,7 +933,7 @@ "
\n", " Click here for text recap of video \n", "\n", - "Regularization forces a model to learn a set solutions you *a priori* believe to be more correct, which reduces overfitting because it doesn't have as much flexibility to fit idiosyncracies in the training data. This adds model bias, but it's a good bias because you know (maybe) that parameters should be small or mostly 0.\n", + "Regularization forces a model to learn a set solutions you *a priori* believe to be more correct, which reduces overfitting because it doesn't have as much flexibility to fit idiosyncrasies in the training data. This adds model bias, but it's a good bias because you know (maybe) that parameters should be small or mostly 0.\n", "\n", "In a GLM, a common form of regularization is to *shrink* the classifier weights. In a linear model, you can see its effect by plotting the weights. We've defined a helper function, `plot_weights`, that we'll use extensively in this section.\n", "\n", diff --git a/tutorials/W1D5_DeepLearning/W1D5_Tutorial1.ipynb b/tutorials/W1D5_DeepLearning/W1D5_Tutorial1.ipynb index 4b12cecea6..1a3bb89c5f 100644 --- a/tutorials/W1D5_DeepLearning/W1D5_Tutorial1.ipynb +++ b/tutorials/W1D5_DeepLearning/W1D5_Tutorial1.ipynb @@ -414,7 +414,7 @@ "
\n", " Click here for text recap of relevant part of video \n", "\n", - "We will be exploring neural activity in mice while the mice is viewing oriented grating stimuli on a screen in front of it. We record neural activity using a technique called two-photon calcium imaging, which allows us to record many thousands of neurons simultanously. The neurons light up when they fire. We then convert this imaging data to a matrix of neural responses by stimuli presented. For the purposes of this tutorial we are going to bin the neural responses and compute each neuron’s tuning curve. We used bins of 1 degree. We will use the response of all neurons in a single bin to try to predict which stimulus was shown. So we are going to be using the responses of 24000 neurons to try to predict 360 different possible stimulus conditions corresponding to each degree of orientation - which means we're in the regime of big data!\n", + "We will be exploring neural activity in mice while the mice is viewing oriented grating stimuli on a screen in front of it. We record neural activity using a technique called two-photon calcium imaging, which allows us to record many thousands of neurons simultaneously. The neurons light up when they fire. We then convert this imaging data to a matrix of neural responses by stimuli presented. For the purposes of this tutorial we are going to bin the neural responses and compute each neuron’s tuning curve. We used bins of 1 degree. We will use the response of all neurons in a single bin to try to predict which stimulus was shown. So we are going to be using the responses of 24000 neurons to try to predict 360 different possible stimulus conditions corresponding to each degree of orientation - which means we're in the regime of big data!\n", "\n", "
\n", "\n", diff --git a/tutorials/W1D5_DeepLearning/W1D5_Tutorial2.ipynb b/tutorials/W1D5_DeepLearning/W1D5_Tutorial2.ipynb index 9ea16fe6e1..b039b0db1f 100644 --- a/tutorials/W1D5_DeepLearning/W1D5_Tutorial2.ipynb +++ b/tutorials/W1D5_DeepLearning/W1D5_Tutorial2.ipynb @@ -238,7 +238,7 @@ " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", " spontaneous levels of activity and then z-scored over stimuli, so\n", - " expect negative numbers. The repsonses were split into train and\n", + " expect negative numbers. The responses were split into train and\n", " test and then each set were averaged in bins of 6 degrees.\n", "\n", " This function returns the relevant data (neural responses and\n", diff --git a/tutorials/W1D5_DeepLearning/W1D5_Tutorial3.ipynb b/tutorials/W1D5_DeepLearning/W1D5_Tutorial3.ipynb index 6f92431110..a9e1dae61f 100644 --- a/tutorials/W1D5_DeepLearning/W1D5_Tutorial3.ipynb +++ b/tutorials/W1D5_DeepLearning/W1D5_Tutorial3.ipynb @@ -272,7 +272,7 @@ " These data comprise time-averaged responses of ~20,000 neurons\n", " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", - " spontanous levels of activity and then z-scored over stimuli, so\n", + " spontaneous levels of activity and then z-scored over stimuli, so\n", " expect negative numbers. They have also been binned and averaged\n", " to each degree of orientation.\n", "\n", diff --git a/tutorials/W1D5_DeepLearning/W1D5_Tutorial4.ipynb b/tutorials/W1D5_DeepLearning/W1D5_Tutorial4.ipynb index 7160a0099f..8cd8199dd7 100644 --- a/tutorials/W1D5_DeepLearning/W1D5_Tutorial4.ipynb +++ b/tutorials/W1D5_DeepLearning/W1D5_Tutorial4.ipynb @@ -395,7 +395,7 @@ " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", " spontaneous levels of activity and then z-scored over stimuli, so\n", - " expect negative numbers. The repsonses were split into train and\n", + " expect negative numbers. The responses were split into train and\n", " test and then each set were averaged in bins of 6 degrees.\n", "\n", " This function returns the relevant data (neural responses and\n", @@ -471,7 +471,7 @@ "\n", " \"\"\"\n", " bins = np.linspace(0, 360, n_classes + 1)\n", - " return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accomodate Python indexing\n", + " return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accommodate Python indexing\n", "\n", "def grating(angle, sf=1 / 28, res=0.1, patch=False):\n", " \"\"\"Generate oriented grating stimulus\n", diff --git a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial1.ipynb b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial1.ipynb index 5828da35b5..59541d175a 100644 --- a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial1.ipynb +++ b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial1.ipynb @@ -414,7 +414,7 @@ "
\n", " Click here for text recap of relevant part of video \n", "\n", - "We will be exploring neural activity in mice while the mice is viewing oriented grating stimuli on a screen in front of it. We record neural activity using a technique called two-photon calcium imaging, which allows us to record many thousands of neurons simultanously. The neurons light up when they fire. We then convert this imaging data to a matrix of neural responses by stimuli presented. For the purposes of this tutorial we are going to bin the neural responses and compute each neuron’s tuning curve. We used bins of 1 degree. We will use the response of all neurons in a single bin to try to predict which stimulus was shown. So we are going to be using the responses of 24000 neurons to try to predict 360 different possible stimulus conditions corresponding to each degree of orientation - which means we're in the regime of big data!\n", + "We will be exploring neural activity in mice while the mice is viewing oriented grating stimuli on a screen in front of it. We record neural activity using a technique called two-photon calcium imaging, which allows us to record many thousands of neurons simultaneously. The neurons light up when they fire. We then convert this imaging data to a matrix of neural responses by stimuli presented. For the purposes of this tutorial we are going to bin the neural responses and compute each neuron’s tuning curve. We used bins of 1 degree. We will use the response of all neurons in a single bin to try to predict which stimulus was shown. So we are going to be using the responses of 24000 neurons to try to predict 360 different possible stimulus conditions corresponding to each degree of orientation - which means we're in the regime of big data!\n", "\n", "
\n", "\n", diff --git a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial2.ipynb b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial2.ipynb index 81ff22ad6e..1f1e84727f 100644 --- a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial2.ipynb +++ b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial2.ipynb @@ -238,7 +238,7 @@ " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", " spontaneous levels of activity and then z-scored over stimuli, so\n", - " expect negative numbers. The repsonses were split into train and\n", + " expect negative numbers. The responses were split into train and\n", " test and then each set were averaged in bins of 6 degrees.\n", "\n", " This function returns the relevant data (neural responses and\n", diff --git a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial3.ipynb b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial3.ipynb index 3301251948..60fd1c887a 100644 --- a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial3.ipynb +++ b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial3.ipynb @@ -272,7 +272,7 @@ " These data comprise time-averaged responses of ~20,000 neurons\n", " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", - " spontanous levels of activity and then z-scored over stimuli, so\n", + " spontaneous levels of activity and then z-scored over stimuli, so\n", " expect negative numbers. They have also been binned and averaged\n", " to each degree of orientation.\n", "\n", diff --git a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial4.ipynb b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial4.ipynb index 479750c94c..b9631fe753 100644 --- a/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial4.ipynb +++ b/tutorials/W1D5_DeepLearning/instructor/W1D5_Tutorial4.ipynb @@ -395,7 +395,7 @@ " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", " spontaneous levels of activity and then z-scored over stimuli, so\n", - " expect negative numbers. The repsonses were split into train and\n", + " expect negative numbers. The responses were split into train and\n", " test and then each set were averaged in bins of 6 degrees.\n", "\n", " This function returns the relevant data (neural responses and\n", @@ -471,7 +471,7 @@ "\n", " \"\"\"\n", " bins = np.linspace(0, 360, n_classes + 1)\n", - " return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accomodate Python indexing\n", + " return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accommodate Python indexing\n", "\n", "def grating(angle, sf=1 / 28, res=0.1, patch=False):\n", " \"\"\"Generate oriented grating stimulus\n", diff --git a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial1.ipynb b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial1.ipynb index fe4d0a634e..6113076864 100644 --- a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial1.ipynb +++ b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial1.ipynb @@ -414,7 +414,7 @@ "
\n", " Click here for text recap of relevant part of video \n", "\n", - "We will be exploring neural activity in mice while the mice is viewing oriented grating stimuli on a screen in front of it. We record neural activity using a technique called two-photon calcium imaging, which allows us to record many thousands of neurons simultanously. The neurons light up when they fire. We then convert this imaging data to a matrix of neural responses by stimuli presented. For the purposes of this tutorial we are going to bin the neural responses and compute each neuron’s tuning curve. We used bins of 1 degree. We will use the response of all neurons in a single bin to try to predict which stimulus was shown. So we are going to be using the responses of 24000 neurons to try to predict 360 different possible stimulus conditions corresponding to each degree of orientation - which means we're in the regime of big data!\n", + "We will be exploring neural activity in mice while the mice is viewing oriented grating stimuli on a screen in front of it. We record neural activity using a technique called two-photon calcium imaging, which allows us to record many thousands of neurons simultaneously. The neurons light up when they fire. We then convert this imaging data to a matrix of neural responses by stimuli presented. For the purposes of this tutorial we are going to bin the neural responses and compute each neuron’s tuning curve. We used bins of 1 degree. We will use the response of all neurons in a single bin to try to predict which stimulus was shown. So we are going to be using the responses of 24000 neurons to try to predict 360 different possible stimulus conditions corresponding to each degree of orientation - which means we're in the regime of big data!\n", "\n", "
\n", "\n", diff --git a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial2.ipynb b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial2.ipynb index f6fc09a512..f3363ff051 100644 --- a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial2.ipynb +++ b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial2.ipynb @@ -238,7 +238,7 @@ " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", " spontaneous levels of activity and then z-scored over stimuli, so\n", - " expect negative numbers. The repsonses were split into train and\n", + " expect negative numbers. The responses were split into train and\n", " test and then each set were averaged in bins of 6 degrees.\n", "\n", " This function returns the relevant data (neural responses and\n", diff --git a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial3.ipynb b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial3.ipynb index 9ce483e260..d1aa370acb 100644 --- a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial3.ipynb +++ b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial3.ipynb @@ -272,7 +272,7 @@ " These data comprise time-averaged responses of ~20,000 neurons\n", " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", - " spontanous levels of activity and then z-scored over stimuli, so\n", + " spontaneous levels of activity and then z-scored over stimuli, so\n", " expect negative numbers. They have also been binned and averaged\n", " to each degree of orientation.\n", "\n", diff --git a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial4.ipynb b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial4.ipynb index ecb3d607ff..ad4ed18501 100644 --- a/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial4.ipynb +++ b/tutorials/W1D5_DeepLearning/student/W1D5_Tutorial4.ipynb @@ -395,7 +395,7 @@ " to ~4,000 stimulus gratings of different orientations, recorded\n", " through Calcium imaginge. The responses have been normalized by\n", " spontaneous levels of activity and then z-scored over stimuli, so\n", - " expect negative numbers. The repsonses were split into train and\n", + " expect negative numbers. The responses were split into train and\n", " test and then each set were averaged in bins of 6 degrees.\n", "\n", " This function returns the relevant data (neural responses and\n", @@ -471,7 +471,7 @@ "\n", " \"\"\"\n", " bins = np.linspace(0, 360, n_classes + 1)\n", - " return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accomodate Python indexing\n", + " return torch.tensor(np.digitize(ori.squeeze(), bins)) - 1 # minus 1 to accommodate Python indexing\n", "\n", "def grating(angle, sf=1 / 28, res=0.1, patch=False):\n", " \"\"\"Generate oriented grating stimulus\n", diff --git a/tutorials/W2D1_ModelingPractice/W2D1_Tutorial1.ipynb b/tutorials/W2D1_ModelingPractice/W2D1_Tutorial1.ipynb index 6f153a4866..ada26a43c3 100644 --- a/tutorials/W2D1_ModelingPractice/W2D1_Tutorial1.ipynb +++ b/tutorials/W2D1_ModelingPractice/W2D1_Tutorial1.ipynb @@ -1184,7 +1184,7 @@ "\n", "\n", "where *S* is the illusion strength and *N* is the noise level, and *k* is a free parameter.\n", - ">we could simply use the frequency of occurance across repetitions as the \"strength of the illusion\"\n", + ">we could simply use the frequency of occurrence across repetitions as the \"strength of the illusion\"\n", "\n", "We would get the noise as the standard deviation of *v(t)*, i.e.\n", "\n", diff --git a/tutorials/W2D1_ModelingPractice/instructor/W2D1_Tutorial1.ipynb b/tutorials/W2D1_ModelingPractice/instructor/W2D1_Tutorial1.ipynb index 4e74f50c8f..ffafd828ad 100644 --- a/tutorials/W2D1_ModelingPractice/instructor/W2D1_Tutorial1.ipynb +++ b/tutorials/W2D1_ModelingPractice/instructor/W2D1_Tutorial1.ipynb @@ -1184,7 +1184,7 @@ "\n", "\n", "where *S* is the illusion strength and *N* is the noise level, and *k* is a free parameter.\n", - ">we could simply use the frequency of occurance across repetitions as the \"strength of the illusion\"\n", + ">we could simply use the frequency of occurrence across repetitions as the \"strength of the illusion\"\n", "\n", "We would get the noise as the standard deviation of *v(t)*, i.e.\n", "\n", diff --git a/tutorials/W2D1_ModelingPractice/student/W2D1_Tutorial1.ipynb b/tutorials/W2D1_ModelingPractice/student/W2D1_Tutorial1.ipynb index 7dba6fa9ab..0aa15898f9 100644 --- a/tutorials/W2D1_ModelingPractice/student/W2D1_Tutorial1.ipynb +++ b/tutorials/W2D1_ModelingPractice/student/W2D1_Tutorial1.ipynb @@ -1184,7 +1184,7 @@ "\n", "\n", "where *S* is the illusion strength and *N* is the noise level, and *k* is a free parameter.\n", - ">we could simply use the frequency of occurance across repetitions as the \"strength of the illusion\"\n", + ">we could simply use the frequency of occurrence across repetitions as the \"strength of the illusion\"\n", "\n", "We would get the noise as the standard deviation of *v(t)*, i.e.\n", "\n", diff --git a/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial1.ipynb b/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial1.ipynb index 00009ae7b1..c9fdec6e0b 100644 --- a/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial1.ipynb +++ b/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial1.ipynb @@ -902,7 +902,7 @@ "| s = Left | +2 | -3 |\n", "| s = right | -2 | +1 |\n", "\n", - "To use possible gains and losses to choose an action, we calculate the **expected utility** of that action by weighing these utilities with the probability of that state occuring. This allows us to choose actions by taking probabilities of events into account: we don't care if the outcome of an action-state pair is a loss if the probability of that state is very low. We can formalize this as:\n", + "To use possible gains and losses to choose an action, we calculate the **expected utility** of that action by weighing these utilities with the probability of that state occurring. This allows us to choose actions by taking probabilities of events into account: we don't care if the outcome of an action-state pair is a loss if the probability of that state is very low. We can formalize this as:\n", "\n", "\\begin{equation}\n", "\\text{Expected utility of action a} = \\sum_{s}U(s,a)P(s)\n", @@ -1490,7 +1490,7 @@ "\n", "We will think of this in two different ways.\n", "\n", - "In the first math exercise, you will think about the case where you know the joint probabilities of two variables and want to figure out the probability of just one variable. To make this explicit, let's assume that a fish has a color that is either gold or silver (our first variable) and a size that is either small or large (our second). We could write out the the **joint probabilities**: the probability of both specific attributes occuring together. For example, the probability of a fish being small and silver, $P(X = \\textrm{small}, Y = \\textrm{silver})$, is 0.4. The following table summarizes our joint probabilities:\n", + "In the first math exercise, you will think about the case where you know the joint probabilities of two variables and want to figure out the probability of just one variable. To make this explicit, let's assume that a fish has a color that is either gold or silver (our first variable) and a size that is either small or large (our second). We could write out the the **joint probabilities**: the probability of both specific attributes occurring together. For example, the probability of a fish being small and silver, $P(X = \\textrm{small}, Y = \\textrm{silver})$, is 0.4. The following table summarizes our joint probabilities:\n", "\n", "| P(X, Y) | Y = silver | Y = gold |\n", "| -------------- |-------------|-----------|\n", diff --git a/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial2.ipynb b/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial2.ipynb index ed3e411878..705a9e591b 100644 --- a/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial2.ipynb +++ b/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial2.ipynb @@ -1413,7 +1413,7 @@ "\n", "One distribution we will use throughout this tutorial is the **Gaussian distribution**, which is also sometimes called the normal distribution.\n", "\n", - "This is a special, and commonly used, distribution for a couple reasons. It is actually the focus of one of the most important theorems in statistics: the Central Limit Theorem. This theorem tells us that if you sum a large number of samples of a variable, that sum is normally distributed *no matter what* the original distribution over a variable was. This is a bit too in-depth for us to get into now but check out links in the Bonus for more information. Additionally, Gaussians have some really nice mathematical properties that permit simple closed-form solutions to several important problems. As we will see later in this tutorial, we can extend Gaussians to be even more flexible and well approximate other distributions using mixtures of Gaussians. In short, the Gaussian is probably the most important continous distribution to understand and use.\n", + "This is a special, and commonly used, distribution for a couple reasons. It is actually the focus of one of the most important theorems in statistics: the Central Limit Theorem. This theorem tells us that if you sum a large number of samples of a variable, that sum is normally distributed *no matter what* the original distribution over a variable was. This is a bit too in-depth for us to get into now but check out links in the Bonus for more information. Additionally, Gaussians have some really nice mathematical properties that permit simple closed-form solutions to several important problems. As we will see later in this tutorial, we can extend Gaussians to be even more flexible and well approximate other distributions using mixtures of Gaussians. In short, the Gaussian is probably the most important continuous distribution to understand and use.\n", "\n", "Gaussians have two parameters. The **mean** $\\mu$, which sets the location of its center. Its \"scale\" or spread is controlled by its **standard deviation** $\\sigma$ or its square, the **variance** $\\sigma^2$. These can be a bit easy to mix up: make sure you are careful about whether you are referring to/using standard deviation or variance.\n", "
" @@ -1618,7 +1618,7 @@ "a &= \\frac{\\sigma_{1}^{-2}}{\\sigma_{1}^{-2} + \\sigma_{2}^{-2}}\n", "\\end{align}\n", "\n", - "This may look confusing but keep in mind that the information in a Gaussian is the inverse of its variance: $\\frac{1}{\\sigma^2}$. Basically, when multiplying Gaussians, the mean of the resulting Gaussian is a weighted average of the original means, where the weights are proportional to the amount of information of that Gaussian. The information in the resulting Gaussian is equal to the sum of informations of the original two. We'll dive into this in the next demo.\n", + "This may look confusing but keep in mind that the information in a Gaussian is the inverse of its variance: $\\frac{1}{\\sigma^2}$. Basically, when multiplying Gaussians, the mean of the resulting Gaussian is a weighted average of the original means, where the weights are proportional to the amount of information of that Gaussian. The information in the resulting Gaussian is equal to the sum of information of the original two. We'll dive into this in the next demo.\n", "
" ] }, @@ -2752,7 +2752,7 @@ "&\\propto \\mathcal{N}(\\mu_{likelihood},\\sigma_{likelihood}^2) \\times \\mathcal{N}(\\mu_{prior},\\sigma_{prior}^2)\n", "\\end{align}\n", "\n", - "We get the parameters of the posterior from multiplying the Gaussians, just as we did in Secton 2.2." + "We get the parameters of the posterior from multiplying the Gaussians, just as we did in Section 2.2." ] }, { @@ -2850,7 +2850,7 @@ "source": [ "### Interactive Demo 5.2: Prior exploration\n", "\n", - "What would happen if we had a different prior distribution for Astrocat's location? Bayes' Rule works exactly the same way if our prior is not a Guassian (though the analytical solution may be far more complex or impossible). Let's look at how the posterior behaves if we have a different prior over Astrocat's location.\n", + "What would happen if we had a different prior distribution for Astrocat's location? Bayes' Rule works exactly the same way if our prior is not a Gaussian (though the analytical solution may be far more complex or impossible). Let's look at how the posterior behaves if we have a different prior over Astrocat's location.\n", "\n", "Consider the following questions:\n", "\n", diff --git a/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial1.ipynb b/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial1.ipynb index 6029a9669a..3c1b8ee3e4 100644 --- a/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial1.ipynb +++ b/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial1.ipynb @@ -902,7 +902,7 @@ "| s = Left | +2 | -3 |\n", "| s = right | -2 | +1 |\n", "\n", - "To use possible gains and losses to choose an action, we calculate the **expected utility** of that action by weighing these utilities with the probability of that state occuring. This allows us to choose actions by taking probabilities of events into account: we don't care if the outcome of an action-state pair is a loss if the probability of that state is very low. We can formalize this as:\n", + "To use possible gains and losses to choose an action, we calculate the **expected utility** of that action by weighing these utilities with the probability of that state occurring. This allows us to choose actions by taking probabilities of events into account: we don't care if the outcome of an action-state pair is a loss if the probability of that state is very low. We can formalize this as:\n", "\n", "\\begin{equation}\n", "\\text{Expected utility of action a} = \\sum_{s}U(s,a)P(s)\n", @@ -1490,7 +1490,7 @@ "\n", "We will think of this in two different ways.\n", "\n", - "In the first math exercise, you will think about the case where you know the joint probabilities of two variables and want to figure out the probability of just one variable. To make this explicit, let's assume that a fish has a color that is either gold or silver (our first variable) and a size that is either small or large (our second). We could write out the the **joint probabilities**: the probability of both specific attributes occuring together. For example, the probability of a fish being small and silver, $P(X = \\textrm{small}, Y = \\textrm{silver})$, is 0.4. The following table summarizes our joint probabilities:\n", + "In the first math exercise, you will think about the case where you know the joint probabilities of two variables and want to figure out the probability of just one variable. To make this explicit, let's assume that a fish has a color that is either gold or silver (our first variable) and a size that is either small or large (our second). We could write out the the **joint probabilities**: the probability of both specific attributes occurring together. For example, the probability of a fish being small and silver, $P(X = \\textrm{small}, Y = \\textrm{silver})$, is 0.4. The following table summarizes our joint probabilities:\n", "\n", "| P(X, Y) | Y = silver | Y = gold |\n", "| -------------- |-------------|-----------|\n", diff --git a/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial2.ipynb b/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial2.ipynb index b5ab8c685b..2bed6ee1bc 100644 --- a/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial2.ipynb +++ b/tutorials/W3D1_BayesianDecisions/instructor/W3D1_Tutorial2.ipynb @@ -1413,7 +1413,7 @@ "\n", "One distribution we will use throughout this tutorial is the **Gaussian distribution**, which is also sometimes called the normal distribution.\n", "\n", - "This is a special, and commonly used, distribution for a couple reasons. It is actually the focus of one of the most important theorems in statistics: the Central Limit Theorem. This theorem tells us that if you sum a large number of samples of a variable, that sum is normally distributed *no matter what* the original distribution over a variable was. This is a bit too in-depth for us to get into now but check out links in the Bonus for more information. Additionally, Gaussians have some really nice mathematical properties that permit simple closed-form solutions to several important problems. As we will see later in this tutorial, we can extend Gaussians to be even more flexible and well approximate other distributions using mixtures of Gaussians. In short, the Gaussian is probably the most important continous distribution to understand and use.\n", + "This is a special, and commonly used, distribution for a couple reasons. It is actually the focus of one of the most important theorems in statistics: the Central Limit Theorem. This theorem tells us that if you sum a large number of samples of a variable, that sum is normally distributed *no matter what* the original distribution over a variable was. This is a bit too in-depth for us to get into now but check out links in the Bonus for more information. Additionally, Gaussians have some really nice mathematical properties that permit simple closed-form solutions to several important problems. As we will see later in this tutorial, we can extend Gaussians to be even more flexible and well approximate other distributions using mixtures of Gaussians. In short, the Gaussian is probably the most important continuous distribution to understand and use.\n", "\n", "Gaussians have two parameters. The **mean** $\\mu$, which sets the location of its center. Its \"scale\" or spread is controlled by its **standard deviation** $\\sigma$ or its square, the **variance** $\\sigma^2$. These can be a bit easy to mix up: make sure you are careful about whether you are referring to/using standard deviation or variance.\n", "
" @@ -1618,7 +1618,7 @@ "a &= \\frac{\\sigma_{1}^{-2}}{\\sigma_{1}^{-2} + \\sigma_{2}^{-2}}\n", "\\end{align}\n", "\n", - "This may look confusing but keep in mind that the information in a Gaussian is the inverse of its variance: $\\frac{1}{\\sigma^2}$. Basically, when multiplying Gaussians, the mean of the resulting Gaussian is a weighted average of the original means, where the weights are proportional to the amount of information of that Gaussian. The information in the resulting Gaussian is equal to the sum of informations of the original two. We'll dive into this in the next demo.\n", + "This may look confusing but keep in mind that the information in a Gaussian is the inverse of its variance: $\\frac{1}{\\sigma^2}$. Basically, when multiplying Gaussians, the mean of the resulting Gaussian is a weighted average of the original means, where the weights are proportional to the amount of information of that Gaussian. The information in the resulting Gaussian is equal to the sum of information of the original two. We'll dive into this in the next demo.\n", "" ] }, @@ -2752,7 +2752,7 @@ "&\\propto \\mathcal{N}(\\mu_{likelihood},\\sigma_{likelihood}^2) \\times \\mathcal{N}(\\mu_{prior},\\sigma_{prior}^2)\n", "\\end{align}\n", "\n", - "We get the parameters of the posterior from multiplying the Gaussians, just as we did in Secton 2.2." + "We get the parameters of the posterior from multiplying the Gaussians, just as we did in Section 2.2." ] }, { @@ -2850,7 +2850,7 @@ "source": [ "### Interactive Demo 5.2: Prior exploration\n", "\n", - "What would happen if we had a different prior distribution for Astrocat's location? Bayes' Rule works exactly the same way if our prior is not a Guassian (though the analytical solution may be far more complex or impossible). Let's look at how the posterior behaves if we have a different prior over Astrocat's location.\n", + "What would happen if we had a different prior distribution for Astrocat's location? Bayes' Rule works exactly the same way if our prior is not a Gaussian (though the analytical solution may be far more complex or impossible). Let's look at how the posterior behaves if we have a different prior over Astrocat's location.\n", "\n", "Consider the following questions:\n", "\n", diff --git a/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial1.ipynb b/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial1.ipynb index d76aa34b54..5d3bbebcbf 100644 --- a/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial1.ipynb +++ b/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial1.ipynb @@ -902,7 +902,7 @@ "| s = Left | +2 | -3 |\n", "| s = right | -2 | +1 |\n", "\n", - "To use possible gains and losses to choose an action, we calculate the **expected utility** of that action by weighing these utilities with the probability of that state occuring. This allows us to choose actions by taking probabilities of events into account: we don't care if the outcome of an action-state pair is a loss if the probability of that state is very low. We can formalize this as:\n", + "To use possible gains and losses to choose an action, we calculate the **expected utility** of that action by weighing these utilities with the probability of that state occurring. This allows us to choose actions by taking probabilities of events into account: we don't care if the outcome of an action-state pair is a loss if the probability of that state is very low. We can formalize this as:\n", "\n", "\\begin{equation}\n", "\\text{Expected utility of action a} = \\sum_{s}U(s,a)P(s)\n", @@ -1436,7 +1436,7 @@ "\n", "We will think of this in two different ways.\n", "\n", - "In the first math exercise, you will think about the case where you know the joint probabilities of two variables and want to figure out the probability of just one variable. To make this explicit, let's assume that a fish has a color that is either gold or silver (our first variable) and a size that is either small or large (our second). We could write out the the **joint probabilities**: the probability of both specific attributes occuring together. For example, the probability of a fish being small and silver, $P(X = \\textrm{small}, Y = \\textrm{silver})$, is 0.4. The following table summarizes our joint probabilities:\n", + "In the first math exercise, you will think about the case where you know the joint probabilities of two variables and want to figure out the probability of just one variable. To make this explicit, let's assume that a fish has a color that is either gold or silver (our first variable) and a size that is either small or large (our second). We could write out the the **joint probabilities**: the probability of both specific attributes occurring together. For example, the probability of a fish being small and silver, $P(X = \\textrm{small}, Y = \\textrm{silver})$, is 0.4. The following table summarizes our joint probabilities:\n", "\n", "| P(X, Y) | Y = silver | Y = gold |\n", "| -------------- |-------------|-----------|\n", diff --git a/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial2.ipynb b/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial2.ipynb index e37f482ac3..69ae8ea319 100644 --- a/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial2.ipynb +++ b/tutorials/W3D1_BayesianDecisions/student/W3D1_Tutorial2.ipynb @@ -1413,7 +1413,7 @@ "\n", "One distribution we will use throughout this tutorial is the **Gaussian distribution**, which is also sometimes called the normal distribution.\n", "\n", - "This is a special, and commonly used, distribution for a couple reasons. It is actually the focus of one of the most important theorems in statistics: the Central Limit Theorem. This theorem tells us that if you sum a large number of samples of a variable, that sum is normally distributed *no matter what* the original distribution over a variable was. This is a bit too in-depth for us to get into now but check out links in the Bonus for more information. Additionally, Gaussians have some really nice mathematical properties that permit simple closed-form solutions to several important problems. As we will see later in this tutorial, we can extend Gaussians to be even more flexible and well approximate other distributions using mixtures of Gaussians. In short, the Gaussian is probably the most important continous distribution to understand and use.\n", + "This is a special, and commonly used, distribution for a couple reasons. It is actually the focus of one of the most important theorems in statistics: the Central Limit Theorem. This theorem tells us that if you sum a large number of samples of a variable, that sum is normally distributed *no matter what* the original distribution over a variable was. This is a bit too in-depth for us to get into now but check out links in the Bonus for more information. Additionally, Gaussians have some really nice mathematical properties that permit simple closed-form solutions to several important problems. As we will see later in this tutorial, we can extend Gaussians to be even more flexible and well approximate other distributions using mixtures of Gaussians. In short, the Gaussian is probably the most important continuous distribution to understand and use.\n", "\n", "Gaussians have two parameters. The **mean** $\\mu$, which sets the location of its center. Its \"scale\" or spread is controlled by its **standard deviation** $\\sigma$ or its square, the **variance** $\\sigma^2$. These can be a bit easy to mix up: make sure you are careful about whether you are referring to/using standard deviation or variance.\n", "" @@ -1605,7 +1605,7 @@ "a &= \\frac{\\sigma_{1}^{-2}}{\\sigma_{1}^{-2} + \\sigma_{2}^{-2}}\n", "\\end{align}\n", "\n", - "This may look confusing but keep in mind that the information in a Gaussian is the inverse of its variance: $\\frac{1}{\\sigma^2}$. Basically, when multiplying Gaussians, the mean of the resulting Gaussian is a weighted average of the original means, where the weights are proportional to the amount of information of that Gaussian. The information in the resulting Gaussian is equal to the sum of informations of the original two. We'll dive into this in the next demo.\n", + "This may look confusing but keep in mind that the information in a Gaussian is the inverse of its variance: $\\frac{1}{\\sigma^2}$. Basically, when multiplying Gaussians, the mean of the resulting Gaussian is a weighted average of the original means, where the weights are proportional to the amount of information of that Gaussian. The information in the resulting Gaussian is equal to the sum of information of the original two. We'll dive into this in the next demo.\n", "" ] }, @@ -2642,7 +2642,7 @@ "&\\propto \\mathcal{N}(\\mu_{likelihood},\\sigma_{likelihood}^2) \\times \\mathcal{N}(\\mu_{prior},\\sigma_{prior}^2)\n", "\\end{align}\n", "\n", - "We get the parameters of the posterior from multiplying the Gaussians, just as we did in Secton 2.2." + "We get the parameters of the posterior from multiplying the Gaussians, just as we did in Section 2.2." ] }, { @@ -2721,7 +2721,7 @@ "source": [ "### Interactive Demo 5.2: Prior exploration\n", "\n", - "What would happen if we had a different prior distribution for Astrocat's location? Bayes' Rule works exactly the same way if our prior is not a Guassian (though the analytical solution may be far more complex or impossible). Let's look at how the posterior behaves if we have a different prior over Astrocat's location.\n", + "What would happen if we had a different prior distribution for Astrocat's location? Bayes' Rule works exactly the same way if our prior is not a Gaussian (though the analytical solution may be far more complex or impossible). Let's look at how the posterior behaves if we have a different prior over Astrocat's location.\n", "\n", "Consider the following questions:\n", "\n", diff --git a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial1.ipynb b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial1.ipynb index 685977bcc1..fd1285aa4b 100644 --- a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial1.ipynb +++ b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial1.ipynb @@ -548,7 +548,7 @@ "\n", "$$\\Delta_t=b+c\\epsilon_t$$\n", "\n", - "The first term, $b$, is a consistant value and equals $b=2\\mu^2/\\sigma^2$. This term favors the actual hidden state. The second term, $c\\epsilon_t$ where $\\epsilon_t\\sim\\mathcal{N}(0,1)$, is a standard random variable which is scaled by the diffusion $c=2\\mu/\\sigma$. You can work through proving this in the bonus exercise 0 below if you wish!\n", + "The first term, $b$, is a consistent value and equals $b=2\\mu^2/\\sigma^2$. This term favors the actual hidden state. The second term, $c\\epsilon_t$ where $\\epsilon_t\\sim\\mathcal{N}(0,1)$, is a standard random variable which is scaled by the diffusion $c=2\\mu/\\sigma$. You can work through proving this in the bonus exercise 0 below if you wish!\n", "\n", "The accumulation of evidence will thus \"drift\" toward one outcome, while \"diffusing\" in random directions, hence the term \"drift-diffusion model\" (DDM). The process is most likely (but not guaranteed) to reach the correct outcome eventually.\n", "\n", diff --git a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb index 4ac1bfe9b0..3e89088aaf 100644 --- a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb +++ b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial2.ipynb @@ -1474,7 +1474,7 @@ " predictive_probs = np.zeros((T,2))\n", " likelihoods = np.zeros((T,2))\n", " posterior_probs = np.zeros((T, 2))\n", - " # Generate an measurement trajectory condtioned on that latent state x is always 1\n", + " # Generate an measurement trajectory conditioned on that latent state x is always 1\n", " if data is not None:\n", " M = data\n", " else:\n", diff --git a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial3.ipynb b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial3.ipynb index 805c208d7a..9a7b037952 100644 --- a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial3.ipynb +++ b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial3.ipynb @@ -1857,7 +1857,7 @@ " # (i.e., multiply gaussians from today's prior and likelihood)\n", " likelihood = ...\n", "\n", - " # Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood\n", + " # Step 2a: To find the posterior variance, add information (inverse variances) of prior and likelihood\n", " info_prior = 1/todays_prior.cov\n", " info_likelihood = 1/likelihood.cov\n", " info_posterior = ...\n", @@ -1937,7 +1937,7 @@ " # (i.e., multiply gaussians from today's prior and likelihood)\n", " likelihood = gaussian(m[i], measurement_noise_cov)\n", "\n", - " # Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood\n", + " # Step 2a: To find the posterior variance, add information (inverse variances) of prior and likelihood\n", " info_prior = 1/todays_prior.cov\n", " info_likelihood = 1/likelihood.cov\n", " info_posterior = info_prior + info_likelihood\n", diff --git a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial4.ipynb b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial4.ipynb index f3384bd0b7..a8204b94d0 100644 --- a/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial4.ipynb +++ b/tutorials/W3D2_HiddenDynamics/W3D2_Tutorial4.ipynb @@ -1015,7 +1015,7 @@ "source": [ "## Interactive Demo 2: Tracking Eye Gaze\n", "\n", - "We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlayed gaze trace.\n", + "We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlaid gaze trace.\n", "\n", "Note that the images are rescaled below for display purposes, they were in their original aspect ratio during the task itself." ] @@ -1163,7 +1163,7 @@ "\n", "We can now use this model to smooth the observed data from the subject. In addition to the source image, we can also see how this model will work with the gaze recorded by the same subject on the other images as well, or even with different subjects.\n", "\n", - "Below are the three stimulus images overlayed with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers." + "Below are the three stimulus images overlaid with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers." ] }, { diff --git a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial1.ipynb b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial1.ipynb index 1ab4ecdf55..78b3bfd406 100644 --- a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial1.ipynb +++ b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial1.ipynb @@ -548,7 +548,7 @@ "\n", "$$\\Delta_t=b+c\\epsilon_t$$\n", "\n", - "The first term, $b$, is a consistant value and equals $b=2\\mu^2/\\sigma^2$. This term favors the actual hidden state. The second term, $c\\epsilon_t$ where $\\epsilon_t\\sim\\mathcal{N}(0,1)$, is a standard random variable which is scaled by the diffusion $c=2\\mu/\\sigma$. You can work through proving this in the bonus exercise 0 below if you wish!\n", + "The first term, $b$, is a consistent value and equals $b=2\\mu^2/\\sigma^2$. This term favors the actual hidden state. The second term, $c\\epsilon_t$ where $\\epsilon_t\\sim\\mathcal{N}(0,1)$, is a standard random variable which is scaled by the diffusion $c=2\\mu/\\sigma$. You can work through proving this in the bonus exercise 0 below if you wish!\n", "\n", "The accumulation of evidence will thus \"drift\" toward one outcome, while \"diffusing\" in random directions, hence the term \"drift-diffusion model\" (DDM). The process is most likely (but not guaranteed) to reach the correct outcome eventually.\n", "\n", diff --git a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial2.ipynb b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial2.ipynb index d5c01048bd..7c47de267a 100644 --- a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial2.ipynb +++ b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial2.ipynb @@ -1476,7 +1476,7 @@ " predictive_probs = np.zeros((T,2))\n", " likelihoods = np.zeros((T,2))\n", " posterior_probs = np.zeros((T, 2))\n", - " # Generate an measurement trajectory condtioned on that latent state x is always 1\n", + " # Generate an measurement trajectory conditioned on that latent state x is always 1\n", " if data is not None:\n", " M = data\n", " else:\n", diff --git a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial3.ipynb b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial3.ipynb index f18174ffff..7dd25dc810 100644 --- a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial3.ipynb +++ b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial3.ipynb @@ -1863,7 +1863,7 @@ " # (i.e., multiply gaussians from today's prior and likelihood)\n", " likelihood = ...\n", "\n", - " # Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood\n", + " # Step 2a: To find the posterior variance, add information (inverse variances) of prior and likelihood\n", " info_prior = 1/todays_prior.cov\n", " info_likelihood = 1/likelihood.cov\n", " info_posterior = ...\n", @@ -1945,7 +1945,7 @@ " # (i.e., multiply gaussians from today's prior and likelihood)\n", " likelihood = gaussian(m[i], measurement_noise_cov)\n", "\n", - " # Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood\n", + " # Step 2a: To find the posterior variance, add information (inverse variances) of prior and likelihood\n", " info_prior = 1/todays_prior.cov\n", " info_likelihood = 1/likelihood.cov\n", " info_posterior = info_prior + info_likelihood\n", diff --git a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial4.ipynb b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial4.ipynb index 9faef7e1ed..42a70db035 100644 --- a/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial4.ipynb +++ b/tutorials/W3D2_HiddenDynamics/instructor/W3D2_Tutorial4.ipynb @@ -1019,7 +1019,7 @@ "source": [ "## Interactive Demo 2: Tracking Eye Gaze\n", "\n", - "We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlayed gaze trace.\n", + "We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlaid gaze trace.\n", "\n", "Note that the images are rescaled below for display purposes, they were in their original aspect ratio during the task itself." ] @@ -1167,7 +1167,7 @@ "\n", "We can now use this model to smooth the observed data from the subject. In addition to the source image, we can also see how this model will work with the gaze recorded by the same subject on the other images as well, or even with different subjects.\n", "\n", - "Below are the three stimulus images overlayed with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers." + "Below are the three stimulus images overlaid with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers." ] }, { diff --git a/tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_6c26b2f4.py b/tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_6c26b2f4.py index 0d74c78370..e7607a967e 100644 --- a/tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_6c26b2f4.py +++ b/tutorials/W3D2_HiddenDynamics/solutions/W3D2_Tutorial3_Solution_6c26b2f4.py @@ -47,7 +47,7 @@ # (i.e., multiply gaussians from today's prior and likelihood) likelihood = gaussian(m[i], measurement_noise_cov) - # Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood + # Step 2a: To find the posterior variance, add information (inverse variances) of prior and likelihood info_prior = 1/todays_prior.cov info_likelihood = 1/likelihood.cov info_posterior = info_prior + info_likelihood diff --git a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial1.ipynb b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial1.ipynb index 623fd5e81a..2f6eb76e9e 100644 --- a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial1.ipynb +++ b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial1.ipynb @@ -548,7 +548,7 @@ "\n", "$$\\Delta_t=b+c\\epsilon_t$$\n", "\n", - "The first term, $b$, is a consistant value and equals $b=2\\mu^2/\\sigma^2$. This term favors the actual hidden state. The second term, $c\\epsilon_t$ where $\\epsilon_t\\sim\\mathcal{N}(0,1)$, is a standard random variable which is scaled by the diffusion $c=2\\mu/\\sigma$. You can work through proving this in the bonus exercise 0 below if you wish!\n", + "The first term, $b$, is a consistent value and equals $b=2\\mu^2/\\sigma^2$. This term favors the actual hidden state. The second term, $c\\epsilon_t$ where $\\epsilon_t\\sim\\mathcal{N}(0,1)$, is a standard random variable which is scaled by the diffusion $c=2\\mu/\\sigma$. You can work through proving this in the bonus exercise 0 below if you wish!\n", "\n", "The accumulation of evidence will thus \"drift\" toward one outcome, while \"diffusing\" in random directions, hence the term \"drift-diffusion model\" (DDM). The process is most likely (but not guaranteed) to reach the correct outcome eventually.\n", "\n", diff --git a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial2.ipynb b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial2.ipynb index 4b734820e4..e8f3aa2fe9 100644 --- a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial2.ipynb +++ b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial2.ipynb @@ -1359,7 +1359,7 @@ " predictive_probs = np.zeros((T,2))\n", " likelihoods = np.zeros((T,2))\n", " posterior_probs = np.zeros((T, 2))\n", - " # Generate an measurement trajectory condtioned on that latent state x is always 1\n", + " # Generate an measurement trajectory conditioned on that latent state x is always 1\n", " if data is not None:\n", " M = data\n", " else:\n", diff --git a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial3.ipynb b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial3.ipynb index 60670d697c..d9243ed027 100644 --- a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial3.ipynb +++ b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial3.ipynb @@ -1730,7 +1730,7 @@ " # (i.e., multiply gaussians from today's prior and likelihood)\n", " likelihood = ...\n", "\n", - " # Step 2a: To find the posterior variance, add informations (inverse variances) of prior and likelihood\n", + " # Step 2a: To find the posterior variance, add information (inverse variances) of prior and likelihood\n", " info_prior = 1/todays_prior.cov\n", " info_likelihood = 1/likelihood.cov\n", " info_posterior = ...\n", diff --git a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial4.ipynb b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial4.ipynb index 73e3268d38..f2c51d7e1f 100644 --- a/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial4.ipynb +++ b/tutorials/W3D2_HiddenDynamics/student/W3D2_Tutorial4.ipynb @@ -929,7 +929,7 @@ "source": [ "## Interactive Demo 2: Tracking Eye Gaze\n", "\n", - "We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlayed gaze trace.\n", + "We have three stimulus images and five different subjects' gaze data. Each subject fixated in the center of the screen before the image appeared, then had a few seconds to freely look around. You can use the widget below to see how different subjects visually scanned the presented image. A subject ID of -1 will show the stimulus images without any overlaid gaze trace.\n", "\n", "Note that the images are rescaled below for display purposes, they were in their original aspect ratio during the task itself." ] @@ -1077,7 +1077,7 @@ "\n", "We can now use this model to smooth the observed data from the subject. In addition to the source image, we can also see how this model will work with the gaze recorded by the same subject on the other images as well, or even with different subjects.\n", "\n", - "Below are the three stimulus images overlayed with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers." + "Below are the three stimulus images overlaid with recorded gaze in magenta and smoothed state from the filter in green, with gaze begin (orange triangle) and gaze end (orange square) markers." ] }, { diff --git a/tutorials/W3D3_OptimalControl/W3D3_Tutorial1.ipynb b/tutorials/W3D3_OptimalControl/W3D3_Tutorial1.ipynb index b3e732d7db..cff2679f39 100644 --- a/tutorials/W3D3_OptimalControl/W3D3_Tutorial1.ipynb +++ b/tutorials/W3D3_OptimalControl/W3D3_Tutorial1.ipynb @@ -481,7 +481,7 @@ " Returns:\n", " fish_state (numpy array of int): locations of the fish\n", " loc (numpy array of int): left or right site, 0 for left, and 1 for right\n", - " rwd (numpy array of binary): whether a fish was catched or not\n", + " rwd (numpy array of binary): whether a fish was caught or not\n", " \"\"\"\n", "\n", " _, p_low_rwd, p_high_rwd, _ = self.params\n", @@ -2298,7 +2298,7 @@ "\n", "## Task Description\n", "\n", - "There are two boxes. The box can be in a high-rewarding state ($s=1$), which means that a reward will be delivered with high probabilty $q_{high}$; or the box can be in low-rewarding state ($s=0$), then the reward will be delivered with low probabilty $q_{low}$.\n", + "There are two boxes. The box can be in a high-rewarding state ($s=1$), which means that a reward will be delivered with high probability $q_{high}$; or the box can be in low-rewarding state ($s=0$), then the reward will be delivered with low probability $q_{low}$.\n", "\n", "The states of the two boxes are latent. At a certain time, only one of the sites can be in high-rewarding state, and the other box will be the opposite. The states of the two boxes switches with a certain probability $p_{sw}$. \n", "\n", @@ -2322,7 +2322,7 @@ "we would like to see the relation between the threshold and the value function. \n", "\n", "### Exercise 1: Control for binary HMM\n", - "In this excercise, we generate the dynamics for the binary HMM task as described above. \n", + "In this exercise, we generate the dynamics for the binary HMM task as described above. \n", "\n", "# This function is the policy based on threshold\n", "\n", @@ -2374,7 +2374,7 @@ "\n", " if act[t - 1] == 0:\n", " loc[t] = loc[t - 1]\n", - " else: # after weitching, open the new box, deplete if any; then wait a usualy time\n", + " else: # after weitching, open the new box, deplete if any; then wait a usually time\n", " loc[t] = 1 - loc[t - 1]\n", "\n", " # new observation\n", @@ -2481,7 +2481,7 @@ " plt.show()\n", "\n", "T = 5000\n", - "p_sw = .95 # state transiton probability\n", + "p_sw = .95 # state transition probability\n", "q_high = .7\n", "q_low = 0 #.2\n", "cost_sw = 1 #int(1/(1-p_sw)) - 5\n", diff --git a/tutorials/W3D3_OptimalControl/W3D3_Tutorial2.ipynb b/tutorials/W3D3_OptimalControl/W3D3_Tutorial2.ipynb index d38fb74753..cc6d6be771 100644 --- a/tutorials/W3D3_OptimalControl/W3D3_Tutorial2.ipynb +++ b/tutorials/W3D3_OptimalControl/W3D3_Tutorial2.ipynb @@ -386,7 +386,7 @@ "\n", "In *open-loop control*, $a_t$ is not a function of $s_t$. In *closed-loop linear control*, $a_t$ is a linear function of the state $s_t$. Specifically, $a_t$ is the control gain, $L_t$, multiplied by $s_t$, i.e., $a_t=L_t s_t$.\n", "\n", - "In the next excercise, you will explore what happens when nothing is controlling the system, when the system is being controlled following an open-loop control policy, and when the system is under closed-loop linear control." + "In the next exercise, you will explore what happens when nothing is controlling the system, when the system is being controlled following an open-loop control policy, and when the system is under closed-loop linear control." ] }, { @@ -1412,12 +1412,12 @@ "\n", " def dynamics_tracking(self, D, B, L):\n", "\n", - " s = np.zeros(self.T) # states intialization\n", + " s = np.zeros(self.T) # states initialization\n", " s[0] = self.ini_state\n", "\n", " noise = np.sqrt(self.noise_var) * standard_normal_noise\n", "\n", - " a = np.zeros(self.T) # control intialization\n", + " a = np.zeros(self.T) # control initialization\n", " a_bar = np.zeros(self.T)\n", " for t in range(self.T - 1):\n", " a_bar[t] = ( - D * s[t] + self.goal[t + 1]) / B\n", @@ -1845,7 +1845,7 @@ "\n", "1. Visualize the system dynamics $s_t$ in closed-loop control with an arbitrary constant control gain. Vary this control gain.\n", "\n", - "2. Play arround with the remaining sliders. What happens when the process noise is high (low)? How about the measurement noise?\n" + "2. Play around with the remaining sliders. What happens when the process noise is high (low)? How about the measurement noise?\n" ] }, { diff --git a/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial1.ipynb b/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial1.ipynb index d712409d3e..e998df6a53 100644 --- a/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial1.ipynb +++ b/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial1.ipynb @@ -481,7 +481,7 @@ " Returns:\n", " fish_state (numpy array of int): locations of the fish\n", " loc (numpy array of int): left or right site, 0 for left, and 1 for right\n", - " rwd (numpy array of binary): whether a fish was catched or not\n", + " rwd (numpy array of binary): whether a fish was caught or not\n", " \"\"\"\n", "\n", " _, p_low_rwd, p_high_rwd, _ = self.params\n", @@ -2304,7 +2304,7 @@ "\n", "## Task Description\n", "\n", - "There are two boxes. The box can be in a high-rewarding state ($s=1$), which means that a reward will be delivered with high probabilty $q_{high}$; or the box can be in low-rewarding state ($s=0$), then the reward will be delivered with low probabilty $q_{low}$.\n", + "There are two boxes. The box can be in a high-rewarding state ($s=1$), which means that a reward will be delivered with high probability $q_{high}$; or the box can be in low-rewarding state ($s=0$), then the reward will be delivered with low probability $q_{low}$.\n", "\n", "The states of the two boxes are latent. At a certain time, only one of the sites can be in high-rewarding state, and the other box will be the opposite. The states of the two boxes switches with a certain probability $p_{sw}$. \n", "\n", @@ -2328,7 +2328,7 @@ "we would like to see the relation between the threshold and the value function. \n", "\n", "### Exercise 1: Control for binary HMM\n", - "In this excercise, we generate the dynamics for the binary HMM task as described above. \n", + "In this exercise, we generate the dynamics for the binary HMM task as described above. \n", "\n", "# This function is the policy based on threshold\n", "\n", @@ -2380,7 +2380,7 @@ "\n", " if act[t - 1] == 0:\n", " loc[t] = loc[t - 1]\n", - " else: # after weitching, open the new box, deplete if any; then wait a usualy time\n", + " else: # after weitching, open the new box, deplete if any; then wait a usually time\n", " loc[t] = 1 - loc[t - 1]\n", "\n", " # new observation\n", @@ -2487,7 +2487,7 @@ " plt.show()\n", "\n", "T = 5000\n", - "p_sw = .95 # state transiton probability\n", + "p_sw = .95 # state transition probability\n", "q_high = .7\n", "q_low = 0 #.2\n", "cost_sw = 1 #int(1/(1-p_sw)) - 5\n", diff --git a/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial2.ipynb b/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial2.ipynb index 7c710fe05a..0ca23fff79 100644 --- a/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial2.ipynb +++ b/tutorials/W3D3_OptimalControl/instructor/W3D3_Tutorial2.ipynb @@ -386,7 +386,7 @@ "\n", "In *open-loop control*, $a_t$ is not a function of $s_t$. In *closed-loop linear control*, $a_t$ is a linear function of the state $s_t$. Specifically, $a_t$ is the control gain, $L_t$, multiplied by $s_t$, i.e., $a_t=L_t s_t$.\n", "\n", - "In the next excercise, you will explore what happens when nothing is controlling the system, when the system is being controlled following an open-loop control policy, and when the system is under closed-loop linear control." + "In the next exercise, you will explore what happens when nothing is controlling the system, when the system is being controlled following an open-loop control policy, and when the system is under closed-loop linear control." ] }, { @@ -1416,12 +1416,12 @@ "\n", " def dynamics_tracking(self, D, B, L):\n", "\n", - " s = np.zeros(self.T) # states intialization\n", + " s = np.zeros(self.T) # states initialization\n", " s[0] = self.ini_state\n", "\n", " noise = np.sqrt(self.noise_var) * standard_normal_noise\n", "\n", - " a = np.zeros(self.T) # control intialization\n", + " a = np.zeros(self.T) # control initialization\n", " a_bar = np.zeros(self.T)\n", " for t in range(self.T - 1):\n", " a_bar[t] = ( - D * s[t] + self.goal[t + 1]) / B\n", @@ -1849,7 +1849,7 @@ "\n", "1. Visualize the system dynamics $s_t$ in closed-loop control with an arbitrary constant control gain. Vary this control gain.\n", "\n", - "2. Play arround with the remaining sliders. What happens when the process noise is high (low)? How about the measurement noise?\n" + "2. Play around with the remaining sliders. What happens when the process noise is high (low)? How about the measurement noise?\n" ] }, { diff --git a/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial1.ipynb b/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial1.ipynb index 6f77232a2d..022fe44575 100644 --- a/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial1.ipynb +++ b/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial1.ipynb @@ -481,7 +481,7 @@ " Returns:\n", " fish_state (numpy array of int): locations of the fish\n", " loc (numpy array of int): left or right site, 0 for left, and 1 for right\n", - " rwd (numpy array of binary): whether a fish was catched or not\n", + " rwd (numpy array of binary): whether a fish was caught or not\n", " \"\"\"\n", "\n", " _, p_low_rwd, p_high_rwd, _ = self.params\n", @@ -2090,7 +2090,7 @@ "\n", "## Task Description\n", "\n", - "There are two boxes. The box can be in a high-rewarding state ($s=1$), which means that a reward will be delivered with high probabilty $q_{high}$; or the box can be in low-rewarding state ($s=0$), then the reward will be delivered with low probabilty $q_{low}$.\n", + "There are two boxes. The box can be in a high-rewarding state ($s=1$), which means that a reward will be delivered with high probability $q_{high}$; or the box can be in low-rewarding state ($s=0$), then the reward will be delivered with low probability $q_{low}$.\n", "\n", "The states of the two boxes are latent. At a certain time, only one of the sites can be in high-rewarding state, and the other box will be the opposite. The states of the two boxes switches with a certain probability $p_{sw}$. \n", "\n", @@ -2114,7 +2114,7 @@ "we would like to see the relation between the threshold and the value function. \n", "\n", "### Exercise 1: Control for binary HMM\n", - "In this excercise, we generate the dynamics for the binary HMM task as described above. \n", + "In this exercise, we generate the dynamics for the binary HMM task as described above. \n", "\n", "# This function is the policy based on threshold\n", "\n", @@ -2166,7 +2166,7 @@ "\n", " if act[t - 1] == 0:\n", " loc[t] = loc[t - 1]\n", - " else: # after weitching, open the new box, deplete if any; then wait a usualy time\n", + " else: # after weitching, open the new box, deplete if any; then wait a usually time\n", " loc[t] = 1 - loc[t - 1]\n", "\n", " # new observation\n", @@ -2273,7 +2273,7 @@ " plt.show()\n", "\n", "T = 5000\n", - "p_sw = .95 # state transiton probability\n", + "p_sw = .95 # state transition probability\n", "q_high = .7\n", "q_low = 0 #.2\n", "cost_sw = 1 #int(1/(1-p_sw)) - 5\n", diff --git a/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb b/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb index 1f1f3e3e4e..57cc872e98 100644 --- a/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb +++ b/tutorials/W3D3_OptimalControl/student/W3D3_Tutorial2.ipynb @@ -386,7 +386,7 @@ "\n", "In *open-loop control*, $a_t$ is not a function of $s_t$. In *closed-loop linear control*, $a_t$ is a linear function of the state $s_t$. Specifically, $a_t$ is the control gain, $L_t$, multiplied by $s_t$, i.e., $a_t=L_t s_t$.\n", "\n", - "In the next excercise, you will explore what happens when nothing is controlling the system, when the system is being controlled following an open-loop control policy, and when the system is under closed-loop linear control." + "In the next exercise, you will explore what happens when nothing is controlling the system, when the system is being controlled following an open-loop control policy, and when the system is under closed-loop linear control." ] }, { @@ -1276,12 +1276,12 @@ "\n", " def dynamics_tracking(self, D, B, L):\n", "\n", - " s = np.zeros(self.T) # states intialization\n", + " s = np.zeros(self.T) # states initialization\n", " s[0] = self.ini_state\n", "\n", " noise = np.sqrt(self.noise_var) * standard_normal_noise\n", "\n", - " a = np.zeros(self.T) # control intialization\n", + " a = np.zeros(self.T) # control initialization\n", " a_bar = np.zeros(self.T)\n", " for t in range(self.T - 1):\n", " a_bar[t] = ( - D * s[t] + self.goal[t + 1]) / B\n", @@ -1695,7 +1695,7 @@ "\n", "1. Visualize the system dynamics $s_t$ in closed-loop control with an arbitrary constant control gain. Vary this control gain.\n", "\n", - "2. Play arround with the remaining sliders. What happens when the process noise is high (low)? How about the measurement noise?\n" + "2. Play around with the remaining sliders. What happens when the process noise is high (low)? How about the measurement noise?\n" ] }, { diff --git a/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial1.ipynb b/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial1.ipynb index 0741113c37..89ab1d2c23 100644 --- a/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial1.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial1.ipynb @@ -744,7 +744,7 @@ "\n", "Before enabling the interactive demo below, take a moment to think about the functions of these two parameters. $\\alpha$ controls the size of the Value function updates produced by each TD-error. In our simple, deterministic world, will this affect the final model we learn? Is a larger $\\alpha$ necessarily better in more complex, realistic environments?\n", "\n", - "The discount rate $\\gamma$ applies an exponentially-decaying weight to returns occuring in the future, rather than the present timestep. How does this affect the model we learn? What happens when $\\gamma=0$ or $\\gamma \\geq 1$?\n", + "The discount rate $\\gamma$ applies an exponentially-decaying weight to returns occurring in the future, rather than the present timestep. How does this affect the model we learn? What happens when $\\gamma=0$ or $\\gamma \\geq 1$?\n", "\n", "Use the widget to test your hypotheses.\n", "\n", diff --git a/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial3.ipynb b/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial3.ipynb index 940574add0..5b6c438255 100644 --- a/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial3.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial3.ipynb @@ -415,7 +415,7 @@ "# @markdown Execute to get helper functions `epsilon_greedy`, `CliffWorld`, and `learn_environment`\n", "\n", "def epsilon_greedy(q, epsilon):\n", - " \"\"\"Epsilon-greedy policy: selects the maximum value action with probabilty\n", + " \"\"\"Epsilon-greedy policy: selects the maximum value action with probability\n", " (1-epsilon) and selects randomly with epsilon probability.\n", "\n", " Args:\n", diff --git a/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial4.ipynb b/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial4.ipynb index b7281dc80b..995c69dc93 100644 --- a/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial4.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/W3D4_Tutorial4.ipynb @@ -257,7 +257,7 @@ "source": [ "#@title Helper Functions\n", "def epsilon_greedy(q, epsilon):\n", - " \"\"\"Epsilon-greedy policy: selects the maximum value action with probabilty\n", + " \"\"\"Epsilon-greedy policy: selects the maximum value action with probability\n", " (1-epsilon) and selects randomly with epsilon probability.\n", "\n", " Args:\n", diff --git a/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial1.ipynb b/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial1.ipynb index b493fe2a7e..6071aa65aa 100644 --- a/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial1.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial1.ipynb @@ -746,7 +746,7 @@ "\n", "Before enabling the interactive demo below, take a moment to think about the functions of these two parameters. $\\alpha$ controls the size of the Value function updates produced by each TD-error. In our simple, deterministic world, will this affect the final model we learn? Is a larger $\\alpha$ necessarily better in more complex, realistic environments?\n", "\n", - "The discount rate $\\gamma$ applies an exponentially-decaying weight to returns occuring in the future, rather than the present timestep. How does this affect the model we learn? What happens when $\\gamma=0$ or $\\gamma \\geq 1$?\n", + "The discount rate $\\gamma$ applies an exponentially-decaying weight to returns occurring in the future, rather than the present timestep. How does this affect the model we learn? What happens when $\\gamma=0$ or $\\gamma \\geq 1$?\n", "\n", "Use the widget to test your hypotheses.\n", "\n", diff --git a/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial3.ipynb b/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial3.ipynb index f08dd5abcf..0dc7a9b157 100644 --- a/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial3.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial3.ipynb @@ -415,7 +415,7 @@ "# @markdown Execute to get helper functions `epsilon_greedy`, `CliffWorld`, and `learn_environment`\n", "\n", "def epsilon_greedy(q, epsilon):\n", - " \"\"\"Epsilon-greedy policy: selects the maximum value action with probabilty\n", + " \"\"\"Epsilon-greedy policy: selects the maximum value action with probability\n", " (1-epsilon) and selects randomly with epsilon probability.\n", "\n", " Args:\n", diff --git a/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial4.ipynb b/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial4.ipynb index 47bf012dc8..7b5bdf4233 100644 --- a/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial4.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/instructor/W3D4_Tutorial4.ipynb @@ -257,7 +257,7 @@ "source": [ "#@title Helper Functions\n", "def epsilon_greedy(q, epsilon):\n", - " \"\"\"Epsilon-greedy policy: selects the maximum value action with probabilty\n", + " \"\"\"Epsilon-greedy policy: selects the maximum value action with probability\n", " (1-epsilon) and selects randomly with epsilon probability.\n", "\n", " Args:\n", diff --git a/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial1.ipynb b/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial1.ipynb index 38cdd2453c..6b5344f62e 100644 --- a/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial1.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial1.ipynb @@ -701,7 +701,7 @@ "\n", "Before enabling the interactive demo below, take a moment to think about the functions of these two parameters. $\\alpha$ controls the size of the Value function updates produced by each TD-error. In our simple, deterministic world, will this affect the final model we learn? Is a larger $\\alpha$ necessarily better in more complex, realistic environments?\n", "\n", - "The discount rate $\\gamma$ applies an exponentially-decaying weight to returns occuring in the future, rather than the present timestep. How does this affect the model we learn? What happens when $\\gamma=0$ or $\\gamma \\geq 1$?\n", + "The discount rate $\\gamma$ applies an exponentially-decaying weight to returns occurring in the future, rather than the present timestep. How does this affect the model we learn? What happens when $\\gamma=0$ or $\\gamma \\geq 1$?\n", "\n", "Use the widget to test your hypotheses.\n", "\n", diff --git a/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial3.ipynb b/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial3.ipynb index 6208dcc117..6e181d96c0 100644 --- a/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial3.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial3.ipynb @@ -415,7 +415,7 @@ "# @markdown Execute to get helper functions `epsilon_greedy`, `CliffWorld`, and `learn_environment`\n", "\n", "def epsilon_greedy(q, epsilon):\n", - " \"\"\"Epsilon-greedy policy: selects the maximum value action with probabilty\n", + " \"\"\"Epsilon-greedy policy: selects the maximum value action with probability\n", " (1-epsilon) and selects randomly with epsilon probability.\n", "\n", " Args:\n", diff --git a/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial4.ipynb b/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial4.ipynb index 86d27d8d73..04a75eb2b0 100644 --- a/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial4.ipynb +++ b/tutorials/W3D4_ReinforcementLearning/student/W3D4_Tutorial4.ipynb @@ -257,7 +257,7 @@ "source": [ "#@title Helper Functions\n", "def epsilon_greedy(q, epsilon):\n", - " \"\"\"Epsilon-greedy policy: selects the maximum value action with probabilty\n", + " \"\"\"Epsilon-greedy policy: selects the maximum value action with probability\n", " (1-epsilon) and selects randomly with epsilon probability.\n", "\n", " Args:\n", diff --git a/tutorials/W3D5_NetworkCausality/W3D5_Tutorial3.ipynb b/tutorials/W3D5_NetworkCausality/W3D5_Tutorial3.ipynb index ede4b46981..23dcc64b66 100644 --- a/tutorials/W3D5_NetworkCausality/W3D5_Tutorial3.ipynb +++ b/tutorials/W3D5_NetworkCausality/W3D5_Tutorial3.ipynb @@ -985,7 +985,7 @@ " n_neurons (int): number of neurons\n", " A (np.ndarray): connectivity matrix\n", " X (np.ndarray): dynamical system\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", "\n", " Returns:\n", diff --git a/tutorials/W3D5_NetworkCausality/W3D5_Tutorial4.ipynb b/tutorials/W3D5_NetworkCausality/W3D5_Tutorial4.ipynb index 859528368e..44146b4c7f 100644 --- a/tutorials/W3D5_NetworkCausality/W3D5_Tutorial4.ipynb +++ b/tutorials/W3D5_NetworkCausality/W3D5_Tutorial4.ipynb @@ -432,7 +432,7 @@ " n_neurons (int): the number of neurons in our system.\n", " timesteps (int): the number of timesteps to simulate our system.\n", " random_state (int): seed for reproducibility\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", " neuron_idx (int): optionally provide a neuron idx to compute connectivity for\n", "\n", @@ -496,7 +496,7 @@ " n_neurons (int): number of neurons\n", " A (np.ndarray): connectivity matrix\n", " X (np.ndarray): dynamical system\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", "\n", " Returns:\n", diff --git a/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial3.ipynb b/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial3.ipynb index 1c87f43adf..46221df4c1 100644 --- a/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial3.ipynb +++ b/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial3.ipynb @@ -987,7 +987,7 @@ " n_neurons (int): number of neurons\n", " A (np.ndarray): connectivity matrix\n", " X (np.ndarray): dynamical system\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", "\n", " Returns:\n", diff --git a/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial4.ipynb b/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial4.ipynb index 7ec846585f..aee57654c8 100644 --- a/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial4.ipynb +++ b/tutorials/W3D5_NetworkCausality/instructor/W3D5_Tutorial4.ipynb @@ -432,7 +432,7 @@ " n_neurons (int): the number of neurons in our system.\n", " timesteps (int): the number of timesteps to simulate our system.\n", " random_state (int): seed for reproducibility\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", " neuron_idx (int): optionally provide a neuron idx to compute connectivity for\n", "\n", @@ -496,7 +496,7 @@ " n_neurons (int): number of neurons\n", " A (np.ndarray): connectivity matrix\n", " X (np.ndarray): dynamical system\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", "\n", " Returns:\n", diff --git a/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial3.ipynb b/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial3.ipynb index a17d559672..43a2e66220 100644 --- a/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial3.ipynb +++ b/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial3.ipynb @@ -950,7 +950,7 @@ " n_neurons (int): number of neurons\n", " A (np.ndarray): connectivity matrix\n", " X (np.ndarray): dynamical system\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", "\n", " Returns:\n", diff --git a/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial4.ipynb b/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial4.ipynb index 71589b52cf..1d63152f78 100644 --- a/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial4.ipynb +++ b/tutorials/W3D5_NetworkCausality/student/W3D5_Tutorial4.ipynb @@ -432,7 +432,7 @@ " n_neurons (int): the number of neurons in our system.\n", " timesteps (int): the number of timesteps to simulate our system.\n", " random_state (int): seed for reproducibility\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", " neuron_idx (int): optionally provide a neuron idx to compute connectivity for\n", "\n", @@ -496,7 +496,7 @@ " n_neurons (int): number of neurons\n", " A (np.ndarray): connectivity matrix\n", " X (np.ndarray): dynamical system\n", - " observed_ratio (float): the proportion of n_neurons observed, must be betweem 0 and 1.\n", + " observed_ratio (float): the proportion of n_neurons observed, must be between 0 and 1.\n", " regression_args (dict): dictionary of lasso regression arguments and hyperparameters\n", "\n", " Returns:\n",