Skip to content
This repository was archived by the owner on Jul 15, 2024. It is now read-only.

Commit 19a4424

Browse files
committed
chore: update examples to replace deprecated functions
1 parent 0b4cbc3 commit 19a4424

File tree

4 files changed

+42
-41
lines changed

4 files changed

+42
-41
lines changed

examples/campaign-finance.ipynb

+1-1
Original file line numberDiff line numberDiff line change
@@ -242,7 +242,7 @@
242242
" \"E\": \"recount\",\n",
243243
" }\n",
244244
" first_letter = pgi[0]\n",
245-
" return first_letter.substitute(election_types, else_=ibis.NA)\n",
245+
" return first_letter.substitute(election_types, else_=ibis.null())\n",
246246
"\n",
247247
"\n",
248248
"cleaned = cleaned.mutate(election_type=get_election_type(_.TRANSACTION_PGI)).drop(\n",

examples/imdb.ipynb

+13-13
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@
9393
"cell_type": "markdown",
9494
"metadata": {},
9595
"source": [
96-
"To ensure column names are Pythonic, we can relabel as `snake_case`."
96+
"To ensure column names are Pythonic, we can rename as `snake_case`."
9797
]
9898
},
9999
{
@@ -102,15 +102,15 @@
102102
"metadata": {},
103103
"outputs": [],
104104
"source": [
105-
"name_basics.relabel(\"snake_case\")"
105+
"name_basics.rename(\"snake_case\")"
106106
]
107107
},
108108
{
109109
"attachments": {},
110110
"cell_type": "markdown",
111111
"metadata": {},
112112
"source": [
113-
"Let's grab all of the relevant IMDB tables and relabel columns."
113+
"Let's grab all of the relevant IMDB tables and rename columns."
114114
]
115115
},
116116
{
@@ -119,13 +119,13 @@
119119
"metadata": {},
120120
"outputs": [],
121121
"source": [
122-
"name_basics = ex.imdb_name_basics.fetch().relabel(\"snake_case\")\n",
123-
"title_akas = ex.imdb_title_akas.fetch().relabel(\"snake_case\")\n",
124-
"title_basics = ex.imdb_title_basics.fetch().relabel(\"snake_case\")\n",
125-
"title_crew = ex.imdb_title_crew.fetch().relabel(\"snake_case\")\n",
126-
"title_episode = ex.imdb_title_episode.fetch().relabel(\"snake_case\")\n",
127-
"title_principals = ex.imdb_title_principals.fetch().relabel(\"snake_case\")\n",
128-
"title_ratings = ex.imdb_title_ratings.fetch().relabel(\"snake_case\")"
122+
"name_basics = ex.imdb_name_basics.fetch().rename(\"snake_case\")\n",
123+
"title_akas = ex.imdb_title_akas.fetch().rename(\"snake_case\")\n",
124+
"title_basics = ex.imdb_title_basics.fetch().rename(\"snake_case\")\n",
125+
"title_crew = ex.imdb_title_crew.fetch().rename(\"snake_case\")\n",
126+
"title_episode = ex.imdb_title_episode.fetch().rename(\"snake_case\")\n",
127+
"title_principals = ex.imdb_title_principals.fetch().rename(\"snake_case\")\n",
128+
"title_ratings = ex.imdb_title_ratings.fetch().rename(\"snake_case\")"
129129
]
130130
},
131131
{
@@ -420,7 +420,7 @@
420420
"metadata": {},
421421
"outputs": [],
422422
"source": [
423-
"ibis.show_sql(name_basics)"
423+
"ibis.to_sql(name_basics)"
424424
]
425425
},
426426
{
@@ -437,8 +437,8 @@
437437
"metadata": {},
438438
"outputs": [],
439439
"source": [
440-
"title_akas = title_akas.mutate(title_id=tconst_to_int(_.title_id)).relabel(\n",
441-
" {\"title_id\": \"tconst\"}\n",
440+
"title_akas = title_akas.mutate(title_id=tconst_to_int(_.title_id)).rename(\n",
441+
" {\"tconst\": \"title_id\"}\n",
442442
")\n",
443443
"title_basics = title_basics.mutate(tconst=tconst_to_int(_.tconst))\n",
444444
"title_crew = title_crew.mutate(\n",

requirements.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,5 @@ jupyterlab == 3.4.8
33
ipywidgets
44
altair
55
pandas < 2.1
6-
ibis-framework[sqlite,duckdb,clickhouse]
6+
ibis-framework[sqlite,duckdb,clickhouse,examples]
77
ibis-substrait < 3.1

scripts/prepare_campaign_finance_data.py

+27-26
Original file line numberDiff line numberDiff line change
@@ -34,33 +34,34 @@
3434
if not parquet_path.exists():
3535
print("Generating itcont.parquet...")
3636
# Read in the CSV
37-
t = ibis.read_csv(csv_path)
38-
3937
# The CSV doesn't have a header, we need to manually add titles
40-
header = [
41-
"CMTE_ID",
42-
"AMNDT_IND",
43-
"RPT_TP",
44-
"TRANSACTION_PGI",
45-
"IMAGE_NUM",
46-
"TRANSACTION_TP",
47-
"ENTITY_TP",
48-
"NAME",
49-
"CITY",
50-
"STATE",
51-
"ZIP_CODE",
52-
"EMPLOYER",
53-
"OCCUPATION",
54-
"TRANSACTION_DT",
55-
"TRANSACTION_AMT",
56-
"OTHER_ID",
57-
"TRAN_ID",
58-
"FILE_NUM",
59-
"MEMO_CD",
60-
"MEMO_TEXT",
61-
"SUB_ID",
62-
]
63-
t = t.relabel(dict(zip(t.columns, header)))
38+
t = ibis.read_csv(
39+
csv_path,
40+
header=False,
41+
names=[
42+
"CMTE_ID",
43+
"AMNDT_IND",
44+
"RPT_TP",
45+
"TRANSACTION_PGI",
46+
"IMAGE_NUM",
47+
"TRANSACTION_TP",
48+
"ENTITY_TP",
49+
"NAME",
50+
"CITY",
51+
"STATE",
52+
"ZIP_CODE",
53+
"EMPLOYER",
54+
"OCCUPATION",
55+
"TRANSACTION_DT",
56+
"TRANSACTION_AMT",
57+
"OTHER_ID",
58+
"TRAN_ID",
59+
"FILE_NUM",
60+
"MEMO_CD",
61+
"MEMO_TEXT",
62+
"SUB_ID",
63+
],
64+
)
6465

6566
# For the analysis, we're only going to use a few of the columns. To save
6667
# bandwidth, lets select out only the columns we'll be using.

0 commit comments

Comments
 (0)