|
1 | 1 | """Tests for multi-target training.""" |
2 | 2 |
|
| 3 | +import json |
3 | 4 | from typing import Dict, Optional, Tuple |
4 | 5 |
|
5 | 6 | import numpy as np |
@@ -248,3 +249,29 @@ def run_with_iter(device: Device) -> None: # pylint: disable=too-many-locals |
248 | 249 | evals_result_0["Train"]["rmse"], evals_result_2["Train"]["rmse"] |
249 | 250 | ) |
250 | 251 | assert_allclose(device, booster_0.inplace_predict(X), booster_2.inplace_predict(X)) |
| 252 | + |
| 253 | + |
| 254 | +def run_eta(device: Device) -> None: |
| 255 | + from sklearn.datasets import make_regression |
| 256 | + |
| 257 | + X, y = make_regression(512, 16, random_state=2025, n_targets=3) |
| 258 | + params = { |
| 259 | + "device": device, |
| 260 | + "multi_strategy": "multi_output_tree", |
| 261 | + "learning_rate": 1.0, |
| 262 | + "debug_synchronize": True, |
| 263 | + "base_score": 0.0, |
| 264 | + } |
| 265 | + Xy = QuantileDMatrix(X, y) |
| 266 | + booster_0 = train(params, Xy, num_boost_round=1) |
| 267 | + params["learning_rate"] = 0.1 |
| 268 | + booster_1 = train(params, Xy, num_boost_round=1) |
| 269 | + params["learning_rate"] = 2.0 |
| 270 | + booster_2 = train(params, Xy, num_boost_round=1) |
| 271 | + |
| 272 | + predt_0 = booster_0.predict(Xy) |
| 273 | + predt_1 = booster_1.predict(Xy) |
| 274 | + predt_2 = booster_2.predict(Xy) |
| 275 | + |
| 276 | + np.testing.assert_allclose(predt_0, predt_1 * 10, rtol=1e-6) |
| 277 | + np.testing.assert_allclose(predt_0 * 2, predt_2, rtol=1e-6) |
0 commit comments