@@ -57,13 +57,17 @@ def select_best_hypers(
57
57
58
58
rng = np .random .default_rng (0 )
59
59
out = bootstrap_hyper_selection (rng , score_per_seed , statistic .value , prefer .value , threshold )
60
+ config = {
61
+ col : df [col ][out .best_idx ] for col in cols
62
+ }
60
63
61
64
return HyperSelectionResult (
62
- best_configuration = df . row ( out . best_idx ) ,
65
+ best_configuration = config ,
63
66
best_score = out .best_score ,
64
67
65
68
uncertainty_set_configurations = [
66
- df .row (idx ) for idx in out .uncertainty_set_idxs
69
+ {col : df [col ][int (idx )] for col in cols }
70
+ for idx in out .uncertainty_set_idxs
67
71
],
68
72
uncertainty_set_probs = out .uncertainty_set_probs ,
69
73
sample_stat = out .sample_stat ,
@@ -72,10 +76,10 @@ def select_best_hypers(
72
76
)
73
77
74
78
class HyperSelectionResult (NamedTuple ):
75
- best_configuration : tuple [ Any , ... ]
79
+ best_configuration : dict [ str , Any ]
76
80
best_score : float
77
81
78
- uncertainty_set_configurations : list [tuple [ Any , ... ]]
82
+ uncertainty_set_configurations : list [dict [ str , Any ]]
79
83
uncertainty_set_probs : np .ndarray
80
84
sample_stat : float
81
85
ci : tuple [float , float ]
0 commit comments