Skip to content

Commit 5228d8b

Browse files
Carl Hvarfnermeta-codesync[bot]
authored andcommitted
Disallow noise_std as list (#4770)
Summary: Pull Request resolved: #4770 Per discussion in D90596997 Reviewed By: saitcakmak Differential Revision: D90777058 fbshipit-source-id: d90616bbfe37f2ad59eff39a7ea91164cf06f1af
1 parent 7586b46 commit 5228d8b

File tree

6 files changed

+40
-41
lines changed

6 files changed

+40
-41
lines changed

ax/benchmark/benchmark_problem.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,9 +45,9 @@ class BenchmarkProblem(Base):
4545
data. This will be used by a `BenchmarkRunner`.
4646
noise_std: Describes how noise is added to the output of the
4747
`test_function`. If a float, IID random normal noise with that
48-
standard deviation is added. A list of floats, or a dict whose keys
49-
match `test_functions.outcome_names`, sets different noise
50-
standard deviations for the different outcomes produced by the
48+
standard deviation is added. A dict whose keys match
49+
`test_functions.outcome_names` sets different noise standard
50+
deviations for the different outcomes produced by the
5151
`test_function`. This will be used by a `BenchmarkRunner`.
5252
optimal_value: The best ground-truth objective value, used for scoring
5353
optimization results on a scale from 0 to 100, where achieving the
@@ -93,7 +93,7 @@ class BenchmarkProblem(Base):
9393
optimization_config: OptimizationConfig
9494
num_trials: int
9595
test_function: BenchmarkTestFunction
96-
noise_std: float | Sequence[float] | Mapping[str, float] = 0.0
96+
noise_std: float | Mapping[str, float] = 0.0
9797
optimal_value: float
9898
baseline_value: float
9999
worst_feasible_value: float | None = None

ax/benchmark/benchmark_runner.py

Lines changed: 7 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@
2424
from ax.runners.simulated_backend import SimulatedBackendRunner
2525
from ax.utils.common.serialization import TClassDecoderRegistry, TDecoderRegistry
2626
from ax.utils.testing.backend_simulator import BackendSimulator, BackendSimulatorOptions
27-
from pyre_extensions import assert_is_instance
2827

2928

3029
def _dict_of_arrays_to_df(
@@ -163,7 +162,7 @@ class BenchmarkRunner(Runner):
163162
test_function: A ``BenchmarkTestFunction`` from which to generate
164163
deterministic data before adding noise.
165164
noise_std: The standard deviation of the noise added to the data. Can be
166-
a list or dict to be per-metric.
165+
a dict to be per-metric.
167166
step_runtime_function: A function that takes in parameters
168167
(in ``TParameterization`` format) and returns the runtime of a step.
169168
max_concurrency: The maximum number of trials that can be running at a
@@ -176,7 +175,7 @@ class BenchmarkRunner(Runner):
176175
"""
177176

178177
test_function: BenchmarkTestFunction
179-
noise_std: float | Sequence[float] | Mapping[str, float] = 0.0
178+
noise_std: float | Mapping[str, float] = 0.0
180179
step_runtime_function: TBenchmarkStepRuntimeFunction | None = None
181180
max_concurrency: int = 1
182181
force_use_simulated_backend: bool = False
@@ -243,17 +242,11 @@ def get_noise_stds(self) -> dict[str, float]:
243242
noise_std = self.noise_std
244243
if isinstance(noise_std, float | int):
245244
return {name: float(noise_std) for name in self.outcome_names}
246-
elif isinstance(noise_std, dict):
247-
if not set(noise_std.keys()) == set(self.outcome_names):
248-
raise ValueError(
249-
"Noise std must have keys equal to outcome names if given as "
250-
"a dict."
251-
)
252-
return noise_std
253-
# list of floats
254-
return dict(
255-
zip(self.outcome_names, assert_is_instance(noise_std, list), strict=True)
256-
)
245+
if not set(noise_std.keys()) == set(self.outcome_names):
246+
raise ValueError(
247+
"Noise std must have keys equal to outcome names if given as a dict."
248+
)
249+
return dict(noise_std)
257250

258251
def run(self, trial: BaseTrial) -> dict[str, BenchmarkTrialMetadata]:
259252
"""Run the trial by evaluating its parameterization(s).

ax/benchmark/problems/synthetic/from_botorch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ def create_problem_from_botorch(
9090
*,
9191
test_problem_class: type[BaseTestProblem] | str,
9292
test_problem_kwargs: dict[str, Any],
93-
noise_std: float | list[float] = 0.0,
93+
noise_std: float | dict[str, float] = 0.0,
9494
num_trials: int,
9595
baseline_value: float | None = None,
9696
name: str | None = None,

ax/benchmark/testing/benchmark_stubs.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ def get_single_objective_benchmark_problem(
6060
num_trials: int = 4,
6161
test_problem_kwargs: dict[str, Any] | None = None,
6262
report_inference_value_as_trace: bool = False,
63-
noise_std: float | list[float] = 0.0,
63+
noise_std: float | dict[str, float] = 0.0,
6464
status_quo_params: TParameterization | None = None,
6565
) -> BenchmarkProblem:
6666
return create_problem_from_botorch(

ax/benchmark/tests/problems/synthetic/test_from_botorch.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def test_single_objective_from_botorch(self) -> None:
130130
def _test_constrained_from_botorch(
131131
self,
132132
observe_noise_sd: bool,
133-
noise_std: float | list[float],
133+
noise_std: float | dict[str, float],
134134
test_problem_class: type[ConstrainedBaseTestProblem],
135135
) -> None:
136136
ax_problem = create_problem_from_botorch(
@@ -174,7 +174,15 @@ def _test_constrained_from_botorch(
174174
def test_constrained_soo_from_botorch(self) -> None:
175175
for observe_noise_sd, noise_std in product(
176176
[False, True],
177-
[0.0, 0.1, [0.1, 0.3, 0.4]],
177+
[
178+
0.0,
179+
0.1,
180+
{
181+
"ConstrainedGramacy": 0.1,
182+
"constraint_slack_0": 0.3,
183+
"constraint_slack_1": 0.4,
184+
},
185+
],
178186
):
179187
with self.subTest(observe_noise_sd=observe_noise_sd, noise_std=noise_std):
180188
self._test_constrained_from_botorch(

ax/benchmark/tests/test_benchmark_runner.py

Lines changed: 17 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def test_runner(self) -> None:
9898
)
9999
for modified_bounds, noise_std in product(
100100
(None, [(0.0, 2.0)] * 6),
101-
(0.0, [0.1] * num_outcomes),
101+
(0.0, {name: 0.1 for name in outcome_names}),
102102
)
103103
]
104104
param_based_cases = [
@@ -108,11 +108,14 @@ def test_runner(self) -> None:
108108
num_outcomes,
109109
)
110110
for num_outcomes in (1, 2)
111-
for noise_std in (0.0, [float(i) for i in range(num_outcomes)])
111+
for noise_std in (
112+
0.0,
113+
{f"objective_{i}": float(i) for i in range(num_outcomes)},
114+
)
112115
]
113116
surrogate_cases = [
114117
(get_soo_surrogate_test_function(lazy=False), noise_std, 1)
115-
for noise_std in (0.0, 1.0, [0.0], [1.0])
118+
for noise_std in (0.0, 1.0, {"branin": 0.0}, {"branin": 1.0})
116119
]
117120
for test_function, noise_std, num_outcomes in (
118121
botorch_cases + param_based_cases + surrogate_cases
@@ -137,11 +140,8 @@ def test_runner(self) -> None:
137140
):
138141
self.assertIs(runner.test_function, test_function)
139142
self.assertEqual(runner.outcome_names, outcome_names)
140-
if isinstance(noise_std, list):
141-
self.assertEqual(
142-
runner.get_noise_stds(),
143-
dict(zip(runner.outcome_names, noise_std)),
144-
)
143+
if isinstance(noise_std, dict):
144+
self.assertEqual(runner.get_noise_stds(), noise_std)
145145
else: # float
146146
self.assertEqual(
147147
runner.get_noise_stds(),
@@ -244,15 +244,17 @@ def test_runner(self) -> None:
244244
set(runner.test_function.outcome_names), set(res.keys())
245245
)
246246

247-
for i, df in enumerate(res.values()):
248-
if isinstance(noise_std, list):
249-
self.assertEqual(df["sem"].item(), noise_std[i])
250-
if all(n == 0 for n in noise_std):
251-
self.assertTrue(np.array_equal(df["mean"], Y[i, :]))
247+
for outcome_name, df in res.items():
248+
if isinstance(noise_std, dict):
249+
self.assertEqual(df["sem"].item(), noise_std[outcome_name])
250+
if all(n == 0 for n in noise_std.values()):
251+
Y_idx = runner.outcome_names.index(outcome_name)
252+
self.assertTrue(np.array_equal(df["mean"], Y[Y_idx, :]))
252253
else: # float
253254
self.assertEqual(df["sem"].item(), noise_std)
254255
if noise_std == 0:
255-
self.assertTrue(np.array_equal(df["mean"], Y[i, :]))
256+
Y_idx = runner.outcome_names.index(outcome_name)
257+
self.assertTrue(np.array_equal(df["mean"], Y[Y_idx, :]))
256258

257259
with self.subTest(f"test `poll_trial_status()`, {test_description}"):
258260
self.assertEqual(
@@ -321,14 +323,10 @@ def test_get_noise_stds(self) -> None:
321323
)
322324
self.assertDictEqual(runner.get_noise_stds(), expected_noise_sd_dict)
323325

324-
with self.subTest("list noise_std"):
325-
runner = BenchmarkRunner(test_function=test_function, noise_std=[1.0])
326-
self.assertDictEqual(runner.get_noise_stds(), expected_noise_sd_dict)
327-
328326
def test_heterogeneous_noise(self) -> None:
329327
outcome_names = ["objective_0", "constraint"]
330328
noise_dict = {"objective_0": 0.1, "constraint": 0.05}
331-
for noise_std in [[0.1, 0.05], noise_dict]:
329+
for noise_std in [noise_dict]:
332330
runner = BenchmarkRunner(
333331
test_function=BoTorchTestFunction(
334332
botorch_problem=ConstrainedHartmann(dim=6),

0 commit comments

Comments
 (0)