Skip to content

Commit 2cc8519

Browse files
More detailed Norman 2019 preprocessing notebook (#3)
* Add Norman 2019 notebook with more details This PR adds a notebook to download + preprocess the Norman 2019 dataset starting directly from downloading the raw counts. The notebook currently downloads the data, and fills in various metadata values. I made this PR as the current Norman 2019 notebook depends on downloading another h5ad file first- I personally like being able to see the full workflow (i.e., going from author provided files to final anndata) as part of the notebooks. As mentioned in #2, I'm not sure what QC steps you prefer so this notebook simply produces an anndata with raw counts. * Add standard metadata fields * standardize naming Authored-by: Ethan Weinberger <[email protected]>
1 parent 040772c commit 2cc8519

File tree

2 files changed

+369
-0
lines changed

2 files changed

+369
-0
lines changed

datasets/Norman_2019_curation.ipynb

Lines changed: 339 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,339 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "d12cb9dc",
6+
"metadata": {},
7+
"source": [
8+
"Accession: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE133344"
9+
]
10+
},
11+
{
12+
"cell_type": "code",
13+
"execution_count": 1,
14+
"id": "ca04f335-6926-4764-82ec-374d7c6f94b4",
15+
"metadata": {},
16+
"outputs": [],
17+
"source": [
18+
"import gzip\n",
19+
"import os\n",
20+
"import re\n",
21+
"\n",
22+
"import pandas as pd\n",
23+
"import numpy as np\n",
24+
"from anndata import AnnData\n",
25+
"from scipy.io import mmread\n",
26+
"from scipy.sparse import coo_matrix\n",
27+
"\n",
28+
"from utils import download_binary_file\n",
29+
"\n",
30+
"# Gene program lists obtained by cross-referencing the heatmap here\n",
31+
"# https://github.com/thomasmaxwellnorman/Perturbseq_GI/blob/master/GI_optimal_umap.ipynb\n",
32+
"# with Figure 2b in Norman 2019\n",
33+
"G1_CYCLE = [\n",
34+
" \"CDKN1C+CDKN1B\",\n",
35+
" \"CDKN1B+ctrl\",\n",
36+
" \"CDKN1B+CDKN1A\",\n",
37+
" \"CDKN1C+ctrl\",\n",
38+
" \"ctrl+CDKN1A\",\n",
39+
" \"CDKN1C+CDKN1A\",\n",
40+
" \"CDKN1A+ctrl\",\n",
41+
"]\n",
42+
"\n",
43+
"ERYTHROID = [\n",
44+
" \"BPGM+SAMD1\",\n",
45+
" \"ATL1+ctrl\",\n",
46+
" \"UBASH3B+ZBTB25\",\n",
47+
" \"PTPN12+PTPN9\",\n",
48+
" \"PTPN12+UBASH3A\",\n",
49+
" \"CBL+CNN1\",\n",
50+
" \"UBASH3B+CNN1\",\n",
51+
" \"CBL+UBASH3B\",\n",
52+
" \"UBASH3B+PTPN9\",\n",
53+
" \"PTPN1+ctrl\",\n",
54+
" \"CBL+PTPN9\",\n",
55+
" \"CNN1+UBASH3A\",\n",
56+
" \"CBL+PTPN12\",\n",
57+
" \"PTPN12+ZBTB25\",\n",
58+
" \"UBASH3B+PTPN12\",\n",
59+
" \"SAMD1+PTPN12\",\n",
60+
" \"SAMD1+UBASH3B\",\n",
61+
" \"UBASH3B+UBASH3A\",\n",
62+
"]\n",
63+
"\n",
64+
"PIONEER_FACTORS = [\n",
65+
" \"ZBTB10+SNAI1\",\n",
66+
" \"FOXL2+MEIS1\",\n",
67+
" \"POU3F2+CBFA2T3\",\n",
68+
" \"DUSP9+SNAI1\",\n",
69+
" \"FOXA3+FOXA1\",\n",
70+
" \"FOXA3+ctrl\",\n",
71+
" \"LYL1+IER5L\",\n",
72+
" \"FOXA1+FOXF1\",\n",
73+
" \"FOXF1+HOXB9\",\n",
74+
" \"FOXA1+HOXB9\",\n",
75+
" \"FOXA3+HOXB9\",\n",
76+
" \"FOXA3+FOXA1\",\n",
77+
" \"FOXA3+FOXL2\",\n",
78+
" \"POU3F2+FOXL2\",\n",
79+
" \"FOXF1+FOXL2\",\n",
80+
" \"FOXA1+FOXL2\",\n",
81+
" \"HOXA13+ctrl\",\n",
82+
" \"ctrl+HOXC13\",\n",
83+
" \"HOXC13+ctrl\",\n",
84+
" \"MIDN+ctrl\",\n",
85+
" \"TP73+ctrl\",\n",
86+
"]\n",
87+
"\n",
88+
"GRANULOCYTE_APOPTOSIS = [\n",
89+
" \"SPI1+ctrl\",\n",
90+
" \"ctrl+SPI1\",\n",
91+
" \"ctrl+CEBPB\",\n",
92+
" \"CEBPB+ctrl\",\n",
93+
" \"JUN+CEBPA\",\n",
94+
" \"CEBPB+CEBPA\",\n",
95+
" \"FOSB+CEBPE\",\n",
96+
" \"ZC3HAV1+CEBPA\",\n",
97+
" \"KLF1+CEBPA\",\n",
98+
" \"ctrl+CEBPA\",\n",
99+
" \"CEBPA+ctrl\",\n",
100+
" \"CEBPE+CEBPA\",\n",
101+
" \"CEBPE+SPI1\",\n",
102+
" \"CEBPE+ctrl\",\n",
103+
" \"ctrl+CEBPE\",\n",
104+
" \"CEBPE+RUNX1T1\",\n",
105+
" \"CEBPE+CEBPB\",\n",
106+
" \"FOSB+CEBPB\",\n",
107+
" \"ETS2+CEBPE\",\n",
108+
"]\n",
109+
"\n",
110+
"MEGAKARYOCYTE = [\n",
111+
" \"ctrl+ETS2\",\n",
112+
" \"MAPK1+ctrl\",\n",
113+
" \"ctrl+MAPK1\",\n",
114+
" \"ETS2+MAPK1\",\n",
115+
" \"CEBPB+MAPK1\",\n",
116+
" \"MAPK1+TGFBR2\",\n",
117+
"]\n",
118+
"\n",
119+
"PRO_GROWTH = [\n",
120+
" \"CEBPE+KLF1\",\n",
121+
" \"KLF1+MAP2K6\",\n",
122+
" \"AHR+KLF1\",\n",
123+
" \"ctrl+KLF1\",\n",
124+
" \"KLF1+ctrl\",\n",
125+
" \"KLF1+BAK1\",\n",
126+
" \"KLF1+TGFBR2\",\n",
127+
"]\n",
128+
"\n",
129+
"\n",
130+
"def download_norman_2019(output_path: str) -> None:\n",
131+
" \"\"\"\n",
132+
" Download Norman et al. 2019 data and metadata files from the hosting URLs.\n",
133+
"\n",
134+
" Args:\n",
135+
" ----\n",
136+
" output_path: Output path to store the downloaded and unzipped\n",
137+
" directories.\n",
138+
"\n",
139+
" Returns\n",
140+
" -------\n",
141+
" None. File directories are downloaded to output_path.\n",
142+
" \"\"\"\n",
143+
"\n",
144+
" file_urls = (\n",
145+
" \"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl\"\n",
146+
" \"/GSE133344_filtered_matrix.mtx.gz\",\n",
147+
" \"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl\"\n",
148+
" \"/GSE133344_filtered_genes.tsv.gz\",\n",
149+
" \"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl\"\n",
150+
" \"/GSE133344_filtered_barcodes.tsv.gz\",\n",
151+
" \"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl\"\n",
152+
" \"/GSE133344_filtered_cell_identities.csv.gz\",\n",
153+
" )\n",
154+
"\n",
155+
" for url in file_urls:\n",
156+
" output_filename = os.path.join(output_path, url.split(\"/\")[-1])\n",
157+
" download_binary_file(url, output_filename)\n",
158+
"\n",
159+
"\n",
160+
"def read_norman_2019(file_directory: str) -> coo_matrix:\n",
161+
" \"\"\"\n",
162+
" Read the expression data for Norman et al. 2019 in the given directory.\n",
163+
"\n",
164+
" Args:\n",
165+
" ----\n",
166+
" file_directory: Directory containing Norman et al. 2019 data.\n",
167+
"\n",
168+
" Returns\n",
169+
" -------\n",
170+
" A sparse matrix containing single-cell gene expression count, with rows\n",
171+
" representing genes and columns representing cells.\n",
172+
" \"\"\"\n",
173+
"\n",
174+
" with gzip.open(\n",
175+
" os.path.join(file_directory, \"GSE133344_filtered_matrix.mtx.gz\"), \"rb\"\n",
176+
" ) as f:\n",
177+
" matrix = mmread(f)\n",
178+
"\n",
179+
" return matrix"
180+
]
181+
},
182+
{
183+
"cell_type": "code",
184+
"execution_count": 2,
185+
"id": "21457d17-ce85-405e-af71-b98f55cd9dfc",
186+
"metadata": {},
187+
"outputs": [
188+
{
189+
"name": "stdout",
190+
"output_type": "stream",
191+
"text": [
192+
"Downloaded data from https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl/GSE133344_filtered_matrix.mtx.gz at ./GSE133344_filtered_matrix.mtx.gz\n",
193+
"Downloaded data from https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl/GSE133344_filtered_genes.tsv.gz at ./GSE133344_filtered_genes.tsv.gz\n",
194+
"Downloaded data from https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl/GSE133344_filtered_barcodes.tsv.gz at ./GSE133344_filtered_barcodes.tsv.gz\n",
195+
"Downloaded data from https://ftp.ncbi.nlm.nih.gov/geo/series/GSE133nnn/GSE133344/suppl/GSE133344_filtered_cell_identities.csv.gz at ./GSE133344_filtered_cell_identities.csv.gz\n"
196+
]
197+
},
198+
{
199+
"name": "stderr",
200+
"output_type": "stream",
201+
"text": [
202+
"Trying to set attribute `.obs` of view, copying.\n"
203+
]
204+
}
205+
],
206+
"source": [
207+
"download_path = \"./norman2019/\"\n",
208+
"\n",
209+
"download_norman_2019(download_path)\n",
210+
"\n",
211+
"matrix = read_norman_2019(download_path)\n",
212+
"\n",
213+
"# List of cell barcodes. The barcodes in this list are stored in the same order\n",
214+
"# as cells are in the count matrix.\n",
215+
"cell_barcodes = pd.read_csv(\n",
216+
" os.path.join(download_path, \"GSE133344_filtered_barcodes.tsv.gz\"),\n",
217+
" sep=\"\\t\",\n",
218+
" header=None,\n",
219+
" names=[\"cell_barcode\"],\n",
220+
")\n",
221+
"\n",
222+
"# IDs/names of the gene features.\n",
223+
"gene_list = pd.read_csv(\n",
224+
" os.path.join(download_path, \"GSE133344_filtered_genes.tsv.gz\"),\n",
225+
" sep=\"\\t\",\n",
226+
" header=None,\n",
227+
" names=[\"gene_id\", \"gene_name\"],\n",
228+
")\n",
229+
"\n",
230+
"# Dataframe where each row corresponds to a cell, and each column corresponds\n",
231+
"# to a gene feature.\n",
232+
"matrix = pd.DataFrame(\n",
233+
" matrix.transpose().todense(),\n",
234+
" columns=gene_list[\"gene_id\"],\n",
235+
" index=cell_barcodes[\"cell_barcode\"],\n",
236+
" dtype=\"int32\",\n",
237+
")\n",
238+
"\n",
239+
"# Dataframe mapping cell barcodes to metadata about that cell (e.g. which CRISPR\n",
240+
"# guides were applied to that cell). Unfortunately, this list has a different\n",
241+
"# ordering from the count matrix, so we have to be careful combining the metadata\n",
242+
"# and count data.\n",
243+
"cell_identities = pd.read_csv(\n",
244+
" os.path.join(download_path, \"GSE133344_filtered_cell_identities.csv.gz\")\n",
245+
").set_index(\"cell_barcode\")\n",
246+
"\n",
247+
"# This merge call reorders our metadata dataframe to match the ordering in the\n",
248+
"# count matrix. Some cells in `cell_barcodes` do not have metadata associated with\n",
249+
"# them, and their metadata values will be filled in as NaN.\n",
250+
"aligned_metadata = pd.merge(\n",
251+
" cell_barcodes,\n",
252+
" cell_identities,\n",
253+
" left_on=\"cell_barcode\",\n",
254+
" right_index=True,\n",
255+
" how=\"left\",\n",
256+
").set_index(\"cell_barcode\")\n",
257+
"\n",
258+
"adata = AnnData(matrix)\n",
259+
"adata.obs = aligned_metadata\n",
260+
"\n",
261+
"# Filter out any cells that don't have metadata values.\n",
262+
"rows_without_nans = [\n",
263+
" index for index, row in adata.obs.iterrows() if not row.isnull().any()\n",
264+
"]\n",
265+
"adata = adata[rows_without_nans, :]\n",
266+
"\n",
267+
"# Remove these as suggested by the authors. See lines referring to\n",
268+
"# NegCtrl1_NegCtrl0 in GI_generate_populations.ipynb in the Norman 2019 paper's\n",
269+
"# Github repo https://github.com/thomasmaxwellnorman/Perturbseq_GI/\n",
270+
"adata = adata[adata.obs[\"guide_identity\"] != \"NegCtrl1_NegCtrl0__NegCtrl1_NegCtrl0\"]\n",
271+
"\n",
272+
"# We create a new metadata column with cleaner representations of CRISPR guide\n",
273+
"# identities. The original format is <Guide1>_<Guide2>__<Guide1>_<Guide2>_<number>\n",
274+
"adata.obs[\"guide_merged\"] = adata.obs[\"guide_identity\"]\n",
275+
"\n",
276+
"control_regex = re.compile(r\"NegCtrl(.*)_NegCtrl(.*)+NegCtrl(.*)_NegCtrl(.*)\")\n",
277+
"for i in adata.obs[\"guide_merged\"].unique():\n",
278+
" if control_regex.match(i):\n",
279+
" # For any cells that only had control guides, we don't care about the\n",
280+
" # specific IDs of the guides. Here we relabel them just as \"ctrl\".\n",
281+
" adata.obs[\"guide_merged\"].replace(i, \"ctrl\", inplace=True)\n",
282+
" else:\n",
283+
" # Otherwise, we reformat the guide label to be <Guide1>+<Guide2>. If Guide1\n",
284+
" # or Guide2 was a control, we replace it with \"ctrl\".\n",
285+
" split = i.split(\"__\")[0]\n",
286+
" split = split.split(\"_\")\n",
287+
" for j, string in enumerate(split):\n",
288+
" if \"NegCtrl\" in split[j]:\n",
289+
" split[j] = \"ctrl\"\n",
290+
" adata.obs[\"guide_merged\"].replace(i, f\"{split[0]}+{split[1]}\", inplace=True)\n",
291+
"\n",
292+
"guides_to_programs = {}\n",
293+
"guides_to_programs.update(dict.fromkeys(G1_CYCLE, \"G1 cell cycle arrest\"))\n",
294+
"guides_to_programs.update(dict.fromkeys(ERYTHROID, \"Erythroid\"))\n",
295+
"guides_to_programs.update(dict.fromkeys(PIONEER_FACTORS, \"Pioneer factors\"))\n",
296+
"guides_to_programs.update(\n",
297+
" dict.fromkeys(GRANULOCYTE_APOPTOSIS, \"Granulocyte/apoptosis\")\n",
298+
")\n",
299+
"guides_to_programs.update(dict.fromkeys(PRO_GROWTH, \"Pro-growth\"))\n",
300+
"guides_to_programs.update(dict.fromkeys(MEGAKARYOCYTE, \"Megakaryocyte\"))\n",
301+
"guides_to_programs.update(dict.fromkeys([\"ctrl\"], \"Ctrl\"))\n",
302+
"\n",
303+
"adata.obs[\"gene_program\"] = [guides_to_programs[x] if x in guides_to_programs else \"N/A\" for x in adata.obs[\"guide_merged\"]]\n",
304+
"adata.obs[\"good_coverage\"] = adata.obs[\"good_coverage\"].astype(bool)"
305+
]
306+
},
307+
{
308+
"cell_type": "code",
309+
"execution_count": 5,
310+
"id": "72c5c54f",
311+
"metadata": {},
312+
"outputs": [],
313+
"source": [
314+
"adata.write('Norman_2019_raw.h5ad')"
315+
]
316+
}
317+
],
318+
"metadata": {
319+
"kernelspec": {
320+
"display_name": "Python 3 (ipykernel)",
321+
"language": "python",
322+
"name": "python3"
323+
},
324+
"language_info": {
325+
"codemirror_mode": {
326+
"name": "ipython",
327+
"version": 3
328+
},
329+
"file_extension": ".py",
330+
"mimetype": "text/x-python",
331+
"name": "python",
332+
"nbconvert_exporter": "python",
333+
"pygments_lexer": "ipython3",
334+
"version": "3.7.0"
335+
}
336+
},
337+
"nbformat": 4,
338+
"nbformat_minor": 5
339+
}

datasets/utils.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
import requests
2+
import os
3+
4+
def download_binary_file(
5+
file_url: str, output_path: str, overwrite: bool = False
6+
) -> None:
7+
"""
8+
Download binary data file from a URL.
9+
10+
Args:
11+
----
12+
file_url: URL where the file is hosted.
13+
output_path: Output path for the downloaded file.
14+
overwrite: Whether to overwrite existing downloaded file.
15+
16+
Returns
17+
-------
18+
None.
19+
"""
20+
file_exists = os.path.exists(output_path)
21+
if (not file_exists) or (file_exists and overwrite):
22+
request = requests.get(file_url)
23+
with open(output_path, "wb") as f:
24+
f.write(request.content)
25+
print(f"Downloaded data from {file_url} at {output_path}")
26+
else:
27+
print(
28+
f"File {output_path} already exists. "
29+
"No files downloaded to overwrite the existing file."
30+
)

0 commit comments

Comments
 (0)