Skip to content

Commit 406de15

Browse files
ItsMrLinfacebook-github-bot
authored andcommitted
add input constructor for qEUBO (#2335)
Summary: Pull Request resolved: #2335 As titled Reviewed By: esantorella Differential Revision: D57130906 fbshipit-source-id: 459e8c1fcef9afe1a9587e812e940e270a786cb2
1 parent 0ff2b3e commit 406de15

File tree

2 files changed

+107
-26
lines changed

2 files changed

+107
-26
lines changed

botorch/acquisition/input_constructors.py

Lines changed: 67 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,10 @@
8383
MCAcquisitionObjective,
8484
PosteriorTransform,
8585
)
86-
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
86+
from botorch.acquisition.preference import (
87+
AnalyticExpectedUtilityOfBestOption,
88+
qExpectedUtilityOfBestOption,
89+
)
8790
from botorch.acquisition.risk_measures import RiskMeasureMCObjective
8891
from botorch.acquisition.utils import (
8992
compute_best_feasible_objective,
@@ -94,7 +97,7 @@
9497
)
9598
from botorch.exceptions.errors import UnsupportedError
9699
from botorch.models.cost import AffineFidelityCostModel
97-
from botorch.models.deterministic import FixedSingleSampleModel
100+
from botorch.models.deterministic import DeterministicModel, FixedSingleSampleModel
98101
from botorch.models.gpytorch import GPyTorchModel
99102
from botorch.models.model import Model
100103
from botorch.optim.optimize import optimize_acqf
@@ -1286,7 +1289,7 @@ def construct_inputs_analytic_eubo(
12861289
r"""Construct kwargs for the `AnalyticExpectedUtilityOfBestOption` constructor.
12871290
12881291
`model` is the primary model defined over the parameter space. It can be the
1289-
outcomde model in BOPE or the preference model in PBO. `pref_model` is the model
1292+
outcome model in BOPE or the preference model in PBO. `pref_model` is the model
12901293
defined over the outcome/metric space, which is typically the preference model
12911294
in BOPE.
12921295
@@ -1328,6 +1331,67 @@ def construct_inputs_analytic_eubo(
13281331
}
13291332

13301333

1334+
@acqf_input_constructor(qExpectedUtilityOfBestOption)
1335+
def construct_inputs_qeubo(
1336+
model: Model,
1337+
pref_model: Optional[Model] = None,
1338+
outcome_model: Optional[DeterministicModel] = None,
1339+
sample_multiplier: Optional[float] = 1.0,
1340+
sampler: Optional[MCSampler] = None,
1341+
objective: Optional[MCAcquisitionObjective] = None,
1342+
posterior_transform: Optional[PosteriorTransform] = None,
1343+
X_pending: Optional[Tensor] = None,
1344+
) -> Dict[str, Any]:
1345+
r"""Construct kwargs for the `qExpectedUtilityOfBestOption` (qEUBO) constructor.
1346+
1347+
`model` is the primary model defined over the parameter space. It can be the
1348+
outcomde model in BOPE or the preference model in PBO. `pref_model` is the model
1349+
defined over the outcome/metric space, which is typically the preference model
1350+
in BOPE.
1351+
1352+
If both model and pref_model exist, we are performing Bayesian Optimization with
1353+
Preference Exploration (BOPE). When only pref_model is None, we are performing
1354+
preferential BO (PBO).
1355+
1356+
Args:
1357+
model: The outcome model to be used in the acquisition function in BOPE
1358+
when pref_model exists; otherwise, model is the preference model and
1359+
we are doing Preferential BO
1360+
pref_model: The preference model to be used in preference exploration as in
1361+
BOPE; if None, we are doing PBO and model is the preference model.
1362+
sample_multiplier: The scale factor for the single-sample model.
1363+
1364+
Returns:
1365+
A dict mapping kwarg names of the constructor to values.
1366+
"""
1367+
if pref_model is None:
1368+
return {
1369+
"pref_model": model,
1370+
"outcome_model": None,
1371+
"sampler": sampler,
1372+
"objective": objective,
1373+
"posterior_transform": posterior_transform,
1374+
"X_pending": X_pending,
1375+
}
1376+
else:
1377+
# construct a deterministic fixed single sample model from `model`
1378+
# i.e., performing EUBO-zeta by default as described
1379+
# in https://arxiv.org/abs/2203.11382
1380+
# using pref_model.dim instead of model.num_outputs here as MTGP's
1381+
# num_outputs could be tied to the number of tasks
1382+
w = torch.randn(pref_model.dim) * sample_multiplier
1383+
one_sample_outcome_model = FixedSingleSampleModel(model=model, w=w)
1384+
1385+
return {
1386+
"pref_model": pref_model,
1387+
"outcome_model": one_sample_outcome_model,
1388+
"sampler": sampler,
1389+
"objective": objective,
1390+
"posterior_transform": posterior_transform,
1391+
"X_pending": X_pending,
1392+
}
1393+
1394+
13311395
def get_best_f_analytic(
13321396
training_data: MaybeDict[SupervisedDataset],
13331397
posterior_transform: Optional[PosteriorTransform] = None,

test/acquisition/test_input_constructors.py

Lines changed: 40 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,10 @@
7878
LinearMCObjective,
7979
ScalarizedPosteriorTransform,
8080
)
81-
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
81+
from botorch.acquisition.preference import (
82+
AnalyticExpectedUtilityOfBestOption,
83+
qExpectedUtilityOfBestOption,
84+
)
8285
from botorch.acquisition.utils import (
8386
expand_trace_observations,
8487
project_to_target_fidelity,
@@ -393,7 +396,10 @@ def test_construct_inputs_noisy_ei(self) -> None:
393396
with self.assertRaisesRegex(ValueError, "Field `X` must be shared"):
394397
c(model=mock_model, training_data=self.multiX_multiY)
395398

396-
def test_construct_inputs_constrained_analytic_eubo(self) -> None:
399+
def test_construct_inputs_eubo(self) -> None:
400+
"""test input constructor for analytical EUBO and MC qEUBO"""
401+
402+
# Set up
397403
# create dummy modellist gp
398404
n = 10
399405
X = torch.linspace(0, 0.95, n).unsqueeze(dim=-1)
@@ -409,21 +415,44 @@ def test_construct_inputs_constrained_analytic_eubo(self) -> None:
409415
)
410416
self.assertEqual(model.num_outputs, 6)
411417

412-
c = get_acqf_input_constructor(AnalyticExpectedUtilityOfBestOption)
413418
mock_pref_model = self.mock_model
414419
# assume we only have a preference model with 2 outcomes
415420
mock_pref_model.dim = 2
416421
mock_pref_model.datapoints = torch.tensor([])
417422

418-
# test basic construction
419-
kwargs = c(model=model, pref_model=mock_pref_model)
420-
self.assertIsInstance(kwargs["outcome_model"], FixedSingleSampleModel)
421-
self.assertIs(kwargs["pref_model"], mock_pref_model)
422-
self.assertIsNone(kwargs["previous_winner"])
423-
# test instantiation
424-
AnalyticExpectedUtilityOfBestOption(**kwargs)
423+
for eubo_acqf in (
424+
AnalyticExpectedUtilityOfBestOption,
425+
qExpectedUtilityOfBestOption,
426+
):
427+
c = get_acqf_input_constructor(eubo_acqf)
428+
429+
# test model only (i.e., PBO) construction
430+
kwargs = c(model=mock_pref_model)
431+
self.assertIsNone(kwargs["outcome_model"])
432+
433+
# test basic construction
434+
kwargs = c(model=model, pref_model=mock_pref_model)
435+
self.assertIsInstance(kwargs["outcome_model"], FixedSingleSampleModel)
436+
self.assertIs(kwargs["pref_model"], mock_pref_model)
437+
if eubo_acqf is AnalyticExpectedUtilityOfBestOption:
438+
self.assertIsNone(kwargs["previous_winner"])
439+
# test instantiation
440+
eubo_acqf(**kwargs)
441+
442+
# test sample_multiplier
443+
torch.manual_seed(123)
444+
kwargs = c(
445+
model=model,
446+
pref_model=mock_pref_model,
447+
sample_multiplier=1e6,
448+
)
449+
# w by default is drawn from std normal and very unlikely to be > 10.0
450+
self.assertTrue((kwargs["outcome_model"].w.abs() > 10.0).all())
451+
# Check w has the right dimension that agrees with the preference model
452+
self.assertEqual(kwargs["outcome_model"].w.shape[-1], mock_pref_model.dim)
425453

426454
# test previous_winner
455+
c = get_acqf_input_constructor(AnalyticExpectedUtilityOfBestOption)
427456
previous_winner = torch.randn(mock_pref_model.dim)
428457
kwargs = c(
429458
model=model,
@@ -434,18 +463,6 @@ def test_construct_inputs_constrained_analytic_eubo(self) -> None:
434463
# test instantiation
435464
AnalyticExpectedUtilityOfBestOption(**kwargs)
436465

437-
# test sample_multiplier
438-
torch.manual_seed(123)
439-
kwargs = c(
440-
model=model,
441-
pref_model=mock_pref_model,
442-
sample_multiplier=1e6,
443-
)
444-
# w by default is drawn from std normal and very unlikely to be > 10.0
445-
self.assertTrue((kwargs["outcome_model"].w.abs() > 10.0).all())
446-
# Check w has the right dimension that agrees with the preference model
447-
self.assertEqual(kwargs["outcome_model"].w.shape[-1], mock_pref_model.dim)
448-
449466

450467
class TestMCAcquisitionFunctionInputConstructors(InputConstructorBaseTestCase):
451468
def test_construct_inputs_mc_base(self) -> None:
@@ -1419,7 +1436,7 @@ def test_eubo(self) -> None:
14191436
pref_model.dim = 2
14201437
pref_model.datapoints = torch.tensor([])
14211438

1422-
classes = [AnalyticExpectedUtilityOfBestOption]
1439+
classes = [AnalyticExpectedUtilityOfBestOption, qExpectedUtilityOfBestOption]
14231440
self._test_constructor_base(
14241441
classes=classes,
14251442
model=model,

0 commit comments

Comments
 (0)