@@ -1072,7 +1072,7 @@ def ml_l_params(trial):
10721072 >>> print(tune_res[0]['ml_l'].best_params)
10731073 {'learning_rate': 0.03907122389107094}
10741074 >>> # Fit and get results
1075- >>> dml_plr.fit().summary
1075+ >>> dml_plr.fit().summary # doctest: +SKIP
10761076 coef std err t P>|t| 2.5 % 97.5 %
10771077 d 0.57436 0.045206 12.705519 5.510257e-37 0.485759 0.662961
10781078 >>> # Example with scoring methods and directions
@@ -1089,7 +1089,7 @@ def ml_l_params(trial):
10891089 ... optuna_settings=optuna_settings, return_tune_res=True)
10901090 >>> print(tune_res[0]['ml_l'].best_params)
10911091 {'learning_rate': 0.04300012336462904}
1092- >>> dml_plr.fit().summary
1092+ >>> dml_plr.fit().summary # doctest: +SKIP
10931093 coef std err t P>|t| 2.5 % 97.5 %
10941094 d 0.574796 0.045062 12.755721 2.896820e-37 0.486476 0.663115
10951095 """
@@ -1580,7 +1580,7 @@ def evaluate_learners(self, learners=None, metric=_rmse):
15801580 >>> def mae(y_true, y_pred):
15811581 ... subset = np.logical_not(np.isnan(y_true))
15821582 ... return mean_absolute_error(y_true[subset], y_pred[subset])
1583- >>> dml_irm_obj.evaluate_learners(metric=mae)
1583+ >>> dml_irm_obj.evaluate_learners(metric=mae) # doctest: +SKIP
15841584 {'ml_g0': array([[0.88086873]]), 'ml_g1': array([[0.8452644]]), 'ml_m': array([[0.35789438]])}
15851585 """
15861586 # if no learners are provided try to evaluate all learners
0 commit comments