|
21 | 21 | from baybe.utils.basic import get_subclasses
|
22 | 22 | from baybe.utils.dataframe import (
|
23 | 23 | add_parameter_noise,
|
24 |
| - create_fake_input, |
25 | 24 | )
|
26 | 25 | from baybe.utils.random import temporary_seed
|
27 | 26 |
|
|
115 | 114 | ],
|
116 | 115 | )
|
117 | 116 | @pytest.mark.parametrize("n_grid_points", [8], ids=["grid8"])
|
118 |
| -def test_pending_points(campaign, batch_size): |
| 117 | +def test_pending_points(campaign, batch_size, fake_measurements): |
119 | 118 | """Test there is no recommendation overlap if pending experiments are specified."""
|
120 | 119 | warnings.filterwarnings("ignore", category=UnusedObjectWarning)
|
121 | 120 |
|
122 | 121 | # Add some initial measurements
|
123 |
| - rec = create_fake_input(campaign.parameters, campaign.targets, batch_size) |
124 |
| - campaign.add_measurements(rec) |
| 122 | + campaign.add_measurements(fake_measurements) |
125 | 123 |
|
126 | 124 | # Get recommendations and set them as pending experiments while getting another set
|
127 | 125 | # Fix the random seed for each recommend call to limit influence of randomness in
|
@@ -156,24 +154,23 @@ def test_pending_points(campaign, batch_size):
|
156 | 154 | )
|
157 | 155 | @pytest.mark.parametrize("n_grid_points", [5], ids=["g5"])
|
158 | 156 | @pytest.mark.parametrize("batch_size", [3], ids=["b3"])
|
159 |
| -def test_invalid_acqf(searchspace, recommender, objective, batch_size, acqf): |
| 157 | +def test_invalid_acqf(searchspace, objective, batch_size, acqf, fake_measurements): |
160 | 158 | """Test exception raised for acqfs that don't support pending experiments."""
|
161 | 159 | recommender = TwoPhaseMetaRecommender(
|
162 | 160 | recommender=BotorchRecommender(acquisition_function=acqf)
|
163 | 161 | )
|
164 | 162 |
|
165 | 163 | # Create fake measurements and pending experiments
|
166 |
| - rec1 = create_fake_input(searchspace.parameters, objective.targets, batch_size) |
167 |
| - rec2 = rec1.copy() |
168 |
| - add_parameter_noise(rec2, searchspace.parameters) |
| 164 | + fake_pending_experiments = fake_measurements.copy() |
| 165 | + add_parameter_noise(fake_pending_experiments, searchspace.parameters) |
169 | 166 |
|
170 | 167 | with pytest.raises(IncompatibleAcquisitionFunctionError):
|
171 | 168 | recommender.recommend(
|
172 | 169 | batch_size,
|
173 | 170 | searchspace,
|
174 | 171 | objective,
|
175 |
| - measurements=rec1, |
176 |
| - pending_experiments=rec2, |
| 172 | + measurements=fake_measurements, |
| 173 | + pending_experiments=fake_pending_experiments, |
177 | 174 | )
|
178 | 175 |
|
179 | 176 |
|
@@ -215,18 +212,18 @@ def test_invalid_input(
|
215 | 212 | batch_size,
|
216 | 213 | invalid_pending_value,
|
217 | 214 | parameter_names,
|
| 215 | + fake_measurements, |
218 | 216 | ):
|
219 | 217 | """Test exception raised for invalid pending experiments input."""
|
220 | 218 | # Create fake measurements and pending experiments
|
221 |
| - rec1 = create_fake_input(searchspace.parameters, objective.targets, batch_size) |
222 |
| - rec2 = rec1.copy() |
223 |
| - rec2[parameter_names[0]] = invalid_pending_value |
| 219 | + fake_pending_experiments = fake_measurements.copy() |
| 220 | + fake_pending_experiments[parameter_names[0]] = invalid_pending_value |
224 | 221 |
|
225 | 222 | with pytest.raises((ValueError, TypeError), match="parameter"):
|
226 | 223 | recommender.recommend(
|
227 | 224 | batch_size,
|
228 | 225 | searchspace,
|
229 | 226 | objective,
|
230 |
| - measurements=rec1, |
231 |
| - pending_experiments=rec2, |
| 227 | + measurements=fake_measurements, |
| 228 | + pending_experiments=fake_pending_experiments, |
232 | 229 | )
|
0 commit comments