1
1
import ibis
2
2
import pytest
3
- from sklearn .metrics import accuracy_score as sk_accuracy_score
4
- from sklearn .metrics import f1_score as sk_f1_score
5
- from sklearn .metrics import precision_score as sk_precision_score
6
- from sklearn .metrics import recall_score as sk_recall_score
3
+ import sklearn .metrics
7
4
8
- from ibis_ml .metrics import accuracy_score , f1_score , precision_score , recall_score
5
+ import ibis_ml .metrics
9
6
10
7
11
8
@pytest .fixture
@@ -19,33 +16,20 @@ def results_table():
19
16
)
20
17
21
18
22
- def test_accuracy_score (results_table ):
19
+ @pytest .mark .parametrize (
20
+ "metric_name" ,
21
+ [
22
+ pytest .param ("accuracy_score" , id = "accuracy_score" ),
23
+ pytest .param ("precision_score" , id = "precision_score" ),
24
+ pytest .param ("recall_score" , id = "recall_score" ),
25
+ pytest .param ("f1_score" , id = "f1_score" ),
26
+ ],
27
+ )
28
+ def test_classification_metrics (results_table , metric_name ):
29
+ ibis_ml_func = getattr (ibis_ml .metrics , metric_name )
30
+ sklearn_func = getattr (sklearn .metrics , metric_name )
23
31
t = results_table
24
32
df = t .to_pandas ()
25
- result = accuracy_score (t .actual , t .prediction )
26
- expected = sk_accuracy_score (df ["actual" ], df ["prediction" ])
27
- assert result == pytest .approx (expected , abs = 1e-4 )
28
-
29
-
30
- def test_precision_score (results_table ):
31
- t = results_table
32
- df = t .to_pandas ()
33
- result = precision_score (t .actual , t .prediction )
34
- expected = sk_precision_score (df ["actual" ], df ["prediction" ])
35
- assert result == pytest .approx (expected , abs = 1e-4 )
36
-
37
-
38
- def test_recall_score (results_table ):
39
- t = results_table
40
- df = t .to_pandas ()
41
- result = recall_score (t .actual , t .prediction )
42
- expected = sk_recall_score (df ["actual" ], df ["prediction" ])
43
- assert result == pytest .approx (expected , abs = 1e-4 )
44
-
45
-
46
- def test_f1_score (results_table ):
47
- t = results_table
48
- df = t .to_pandas ()
49
- result = f1_score (t .actual , t .prediction )
50
- expected = sk_f1_score (df ["actual" ], df ["prediction" ])
33
+ result = ibis_ml_func (t .actual , t .prediction ).to_pyarrow ().as_py ()
34
+ expected = sklearn_func (df ["actual" ], df ["prediction" ])
51
35
assert result == pytest .approx (expected , abs = 1e-4 )
0 commit comments