|
12 | 12 |
|
13 | 13 |
|
14 | 14 | def _basic_optuna_settings(additional=None): |
15 | | - base_settings = {"n_trials": 20, "sampler": optuna.samplers.RandomSampler(seed=3141)} |
| 15 | + base_settings = {"n_trials": 10, "sampler": optuna.samplers.RandomSampler(seed=3141)} |
16 | 16 | if additional is not None: |
17 | 17 | base_settings.update(additional) |
18 | 18 | return base_settings |
@@ -72,8 +72,8 @@ def test_resolve_optuna_scoring_classifier_default(): |
72 | 72 | def test_resolve_optuna_scoring_with_criterion_keeps_default(): |
73 | 73 | learner = DecisionTreeRegressor(random_state=0) |
74 | 74 | scoring, message = _resolve_optuna_scoring(None, learner, "ml_l") |
75 | | - assert scoring is None |
76 | | - assert "criterion" in message |
| 75 | + assert scoring == "neg_root_mean_squared_error" |
| 76 | + assert "neg_root_mean_squared_error" in message |
77 | 77 |
|
78 | 78 |
|
79 | 79 | def test_resolve_optuna_scoring_lightgbm_regressor_default(): |
@@ -153,8 +153,8 @@ def test_doubleml_optuna_partial_tuning_single_learner(): |
153 | 153 | assert isinstance(tune_res[0], dict) |
154 | 154 | assert set(tune_res[0].keys()) == {"ml_l"} |
155 | 155 | l_tune = tune_res[0]["ml_l"] |
156 | | - assert hasattr(l_tune, "tuned_") |
157 | | - assert l_tune.tuned_ is True |
| 156 | + assert hasattr(l_tune, "tuned") |
| 157 | + assert l_tune.tuned is True |
158 | 158 | assert "ml_m" not in tune_res[0] |
159 | 159 |
|
160 | 160 |
|
|
0 commit comments