@@ -58,7 +58,7 @@ class DoubleMLSSM(LinearScoreMixin, DoubleML):
58
58
59
59
normalize_ipw : bool
60
60
Indicates whether the inverse probability weights are normalized.
61
- Default is ``True ``.
61
+ Default is ``False ``.
62
62
63
63
trimming_rule : str
64
64
A str (``'truncate'`` is the only choice) specifying the trimming approach.
@@ -136,7 +136,7 @@ def __init__(self,
136
136
apply_cross_fitting )
137
137
138
138
self ._external_predictions_implemented = False
139
- self ._sensitivity_implemented = True
139
+ self ._sensitivity_implemented = False
140
140
self ._normalize_ipw = normalize_ipw
141
141
142
142
self ._trimming_rule = trimming_rule
@@ -264,15 +264,33 @@ def _nuisance_est(self, smpls, n_jobs_cv, external_predictions, return_models=Fa
264
264
pi_hat = copy .deepcopy (g_hat_d1 )
265
265
m_hat = copy .deepcopy (g_hat_d1 )
266
266
267
- # create strata for splitting
268
- strata = self ._dml_data .d .reshape (- 1 , 1 ) + 2 * self ._dml_data .t .reshape (- 1 , 1 )
269
-
270
267
# pi_hat - used for preliminary estimation of propensity score pi, overwritten in each iteration
271
268
pi_hat_prelim = {'models' : None ,
272
269
'targets' : [],
273
270
'preds' : []
274
271
}
275
272
273
+ # initialize models
274
+ fitted_models = {}
275
+ for learner in self .params_names :
276
+ # set nuisance model parameters
277
+ est_params = self ._get_params (learner )
278
+
279
+ if learner == 'ml_g_d1' or learner == 'ml_g_d0' :
280
+ nuisance = 'ml_g'
281
+ else :
282
+ nuisance = learner
283
+
284
+ if est_params is not None :
285
+ fitted_models [learner ] = [
286
+ clone (self ._learner [nuisance ]).set_params (** est_params [i_fold ]) for i_fold in range (self .n_folds )
287
+ ]
288
+ else :
289
+ fitted_models [learner ] = [clone (self ._learner [nuisance ]) for i_fold in range (self .n_folds )]
290
+
291
+ # create strata for splitting
292
+ strata = self ._dml_data .d .reshape (- 1 , 1 ) + 2 * self ._dml_data .t .reshape (- 1 , 1 )
293
+
276
294
# calculate nuisance functions over different folds - nested cross-fitting
277
295
for i_fold in range (self .n_folds ):
278
296
train_inds = smpls [i_fold ][0 ]
@@ -286,12 +304,9 @@ def _nuisance_est(self, smpls, n_jobs_cv, external_predictions, return_models=Fa
286
304
s_train_1 = s [train_inds_1 ]
287
305
dx_train_1 = dx [train_inds_1 , :]
288
306
289
- # preliminary propensity score for selection
290
- ml_pi_prelim = clone (self ._learner ['ml_pi' ])
291
-
292
- # fit on first part of training set
293
- ml_pi_prelim .fit (dx_train_1 , s_train_1 )
294
- pi_hat_prelim ['preds' ] = _predict_zero_one_propensity (ml_pi_prelim , dx )
307
+ # fit propensity score for selection on first part of training set
308
+ fitted_models ['ml_pi' ][i_fold ].fit (dx_train_1 , s_train_1 )
309
+ pi_hat_prelim ['preds' ] = _predict_zero_one_propensity (fitted_models ['ml_pi' ][i_fold ], dx )
295
310
pi_hat_prelim ['targets' ] = s
296
311
297
312
# predictions for small pi in denominator
@@ -306,10 +321,9 @@ def _nuisance_est(self, smpls, n_jobs_cv, external_predictions, return_models=Fa
306
321
d_train_2 = d [train_inds_2 ]
307
322
xpi_test = xpi [test_inds , :]
308
323
309
- ml_m = clone (self ._learner ['ml_m' ])
310
- ml_m .fit (xpi_train_2 , d_train_2 )
324
+ fitted_models ['ml_m' ][i_fold ].fit (xpi_train_2 , d_train_2 )
311
325
312
- m_hat ['preds' ][test_inds ] = _predict_zero_one_propensity (ml_m , xpi_test )
326
+ m_hat ['preds' ][test_inds ] = _predict_zero_one_propensity (fitted_models [ ' ml_m' ][ i_fold ] , xpi_test )
313
327
m_hat ['targets' ][test_inds ] = d [test_inds ]
314
328
315
329
# estimate conditional outcome g on second training sample - treatment
@@ -318,11 +332,10 @@ def _nuisance_est(self, smpls, n_jobs_cv, external_predictions, return_models=Fa
318
332
xpi_s1_d1_train_2 = xpi [s1_d1_train_2_indices , :]
319
333
y_s1_d1_train_2 = y [s1_d1_train_2_indices ]
320
334
321
- ml_g_d1_prelim = clone (self ._learner ['ml_g' ])
322
- ml_g_d1_prelim .fit (xpi_s1_d1_train_2 , y_s1_d1_train_2 )
335
+ fitted_models ['ml_g_d1' ][i_fold ].fit (xpi_s1_d1_train_2 , y_s1_d1_train_2 )
323
336
324
337
# predict conditional outcome
325
- g_hat_d1 ['preds' ][test_inds ] = ml_g_d1_prelim .predict (xpi_test )
338
+ g_hat_d1 ['preds' ][test_inds ] = fitted_models [ 'ml_g_d1' ][ i_fold ] .predict (xpi_test )
326
339
g_hat_d1 ['targets' ][test_inds ] = y [test_inds ]
327
340
328
341
# estimate conditional outcome on second training sample - control
@@ -331,13 +344,18 @@ def _nuisance_est(self, smpls, n_jobs_cv, external_predictions, return_models=Fa
331
344
xpi_s1_d0_train_2 = xpi [s1_d0_train_2_indices , :]
332
345
y_s1_d0_train_2 = y [s1_d0_train_2_indices ]
333
346
334
- ml_g_d0_prelim = clone (self ._learner ['ml_g' ])
335
- ml_g_d0_prelim .fit (xpi_s1_d0_train_2 , y_s1_d0_train_2 )
347
+ fitted_models ['ml_g_d0' ][i_fold ].fit (xpi_s1_d0_train_2 , y_s1_d0_train_2 )
336
348
337
349
# predict conditional outcome
338
- g_hat_d0 ['preds' ][test_inds ] = ml_g_d0_prelim .predict (xpi_test )
350
+ g_hat_d0 ['preds' ][test_inds ] = fitted_models [ 'ml_g_d0' ][ i_fold ] .predict (xpi_test )
339
351
g_hat_d0 ['targets' ][test_inds ] = y [test_inds ]
340
352
353
+ if return_models :
354
+ g_hat_d1 ['models' ] = fitted_models ['ml_g_d1' ]
355
+ g_hat_d0 ['models' ] = fitted_models ['ml_g_d0' ]
356
+ pi_hat ['models' ] = fitted_models ['ml_pi' ]
357
+ m_hat ['models' ] = fitted_models ['ml_m' ]
358
+
341
359
m_hat ['preds' ] = _trimm (m_hat ['preds' ], self ._trimming_rule , self ._trimming_threshold )
342
360
343
361
# treatment indicator
0 commit comments