Skip to content

Commit efa09e4

Browse files
committed
fix linting and formular
1 parent ce4fec5 commit efa09e4

File tree

2 files changed

+11
-14
lines changed

2 files changed

+11
-14
lines changed

econml/score/drscorer.py

+7-9
Original file line numberDiff line numberDiff line change
@@ -10,14 +10,13 @@
1010

1111

1212
class DRScorer:
13-
""" Scorer based on the DRLearner loss. Fits regression model g (using T-Learner) and propensity model p at fit time
14-
and calculates the regression and propensity of the evaluation data::
13+
""" Scorer based on the DRLearner loss. Fits regression model g (using T-Learner) and propensity model p at fit
14+
time and calculates the regression and propensity of the evaluation data::
1515
1616
g (model_regression) = E[Y | X, W, T]
17-
1817
p (model_propensity) = Pr[T | X, W]
1918
20-
Ydr(g,p) = g + (Y - g ) / p * T
19+
Ydr(g,p) = g(X,W,T) + (Y - g(X,W,T)) / p_T(X,W)
2120
2221
Then for any given cate model calculates the loss::
2322
@@ -206,17 +205,16 @@ def score(self, cate_model):
206205
score : double
207206
An analogue of the DR-square loss for the causal setting.
208207
"""
209-
g, p = self.drlearner_._cached_values.nuisances
210-
Y = self.drlearner_._cached_values.Y
211-
T = self.drlearner_._cached_values.T
212-
Ydr = g + (Y - g) / p * T
208+
Y = self.drlearner_._cached_values.Y
209+
T = self.drlearner_._cached_values.T
210+
Y_pred, _ = self.drlearner_._cached_values.nuisances
211+
Ydr = Y_pred[..., 1:] - Y_pred[..., [0]]
213212
X = self.drlearner_._cached_values.W[:, :self.dx_]
214213
sample_weight = self.drlearner_._cached_values.sample_weight
215214
if Ydr.ndim == 1:
216215
Ydr = Ydr.reshape((-1, 1))
217216

218217
effects = cate_model.const_marginal_effect(X).reshape((-1, Ydr.shape[1]))
219-
220218
if sample_weight is not None:
221219
return 1 - np.mean(np.average((Ydr - effects)**2, weights=sample_weight, axis=0)) / self.base_score_
222220
else:

econml/tests/test_drscorer.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,8 @@ def _get_data(self):
2929
X = np.random.normal(size=(1000, 3))
3030
T = np.random.binomial(2, scipy.special.expit(X[:, 0]))
3131
sigma = 0.001
32-
y = (1 + .5*X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))
32+
y = (1 + .5 * X[:, 0]) * T + X[:, 0] + np.random.normal(0, sigma, size=(1000,))
3333
return y, T, X, X[:, 0]
34-
3534

3635
def test_comparison(self):
3736
def reg():
@@ -53,7 +52,7 @@ def clf():
5352
('dalearner', DomainAdaptationLearner(models=reg(), final_models=reg(), propensity_model=clf())),
5453
('slearner', SLearner(overall_model=reg())),
5554
('tlearner', TLearner(models=reg())),
56-
('drlearner', DRLearner(model_propensity='auto',model_regression='auto',
55+
('drlearner', DRLearner(model_propensity='auto', model_regression='auto',
5756
model_final=reg(), cv=3)),
5857
('rlearner', NonParamDML(model_y=reg(), model_t=clf(), model_final=reg(),
5958
discrete_treatment=True, cv=3)),
@@ -72,8 +71,8 @@ def clf():
7271
multitask_model_final=False,
7372
featurizer=None,
7473
min_propensity=1e-6,
75-
cv=3,
76-
mc_iters=2,
74+
cv=3,
75+
mc_iters=2,
7776
mc_agg='median')
7877
scorer.fit(Y_val, T_val, X=X_val)
7978
rscore = [scorer.score(mdl) for _, mdl in models]

0 commit comments

Comments
 (0)