diff --git a/ax/adapter/tests/test_adapter_utils.py b/ax/adapter/tests/test_adapter_utils.py index 641c97cc27d..acf25527962 100644 --- a/ax/adapter/tests/test_adapter_utils.py +++ b/ax/adapter/tests/test_adapter_utils.py @@ -56,9 +56,9 @@ def test_feasible_hypervolume(self) -> None: mb = Metric(name="b", lower_is_better=True) mc = Metric(name="c", lower_is_better=False) optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[Objective(metric=ma), Objective(metric=mb)] - ), + objectives=[ + MultiObjective(objectives=[Objective(metric=ma), Objective(metric=mb)]) + ], outcome_constraints=[ OutcomeConstraint( metric=mc, diff --git a/ax/adapter/tests/test_base_adapter.py b/ax/adapter/tests/test_base_adapter.py index bf4a72949e8..76293aac680 100644 --- a/ax/adapter/tests/test_base_adapter.py +++ b/ax/adapter/tests/test_base_adapter.py @@ -214,7 +214,7 @@ def test_fit_tracking_metrics(self) -> None: fit_tracking_metrics=False, ) new_oc = OptimizationConfig( - objective=Objective(metric=Metric(name="test_metric2"), minimize=False), + objectives=[Objective(metric=Metric(name="test_metric2"), minimize=False)], ) with self.assertRaisesRegex(UnsupportedError, "fit_tracking_metrics"): adapter.gen(n=1, optimization_config=new_oc) @@ -301,7 +301,7 @@ def test_gen_base(self, mock_fit: Mock, mock_gen_arms: Mock) -> None: # Gen with a different optimization config. oc2 = OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=True) + objectives=[Objective(metric=Metric(name="branin"), minimize=True)] ) with mock.patch(ADAPTER__GEN_PATH, return_value=mock_return_value) as mock_gen: adapter.gen(n=1, search_space=search_space, optimization_config=oc2) @@ -1369,10 +1369,12 @@ def test_untransform_observation_features_derived_parameter_with_digits( is_test=True, tracking_metrics=[metric], optimization_config=OptimizationConfig( - objective=Objective( - metric=metric, - minimize=True, - ) + objectives=[ + Objective( + metric=metric, + minimize=True, + ) + ] ), runner=SyntheticRunner(), ) diff --git a/ax/adapter/tests/test_cross_validation.py b/ax/adapter/tests/test_cross_validation.py index d277c4bdd22..21278a8c367 100644 --- a/ax/adapter/tests/test_cross_validation.py +++ b/ax/adapter/tests/test_cross_validation.py @@ -475,7 +475,7 @@ def test_has_good_opt_config_model_fit(self) -> None: # Test single objective optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("m1"), minimize=True) + objectives=[Objective(metric=Metric("m1"), minimize=True)] ) has_good_fit = has_good_opt_config_model_fit( optimization_config=optimization_config, @@ -485,12 +485,14 @@ def test_has_good_opt_config_model_fit(self) -> None: # Test multi objective optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[ - Objective(metric=Metric("m1"), minimize=False), - Objective(metric=Metric("m2"), minimize=False), - ] - ) + objectives=[ + MultiObjective( + objectives=[ + Objective(metric=Metric("m1"), minimize=False), + Objective(metric=Metric("m2"), minimize=False), + ] + ) + ] ) has_good_fit = has_good_opt_config_model_fit( optimization_config=optimization_config, @@ -500,7 +502,7 @@ def test_has_good_opt_config_model_fit(self) -> None: # Test constraints optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("m1"), minimize=False), + objectives=[Objective(metric=Metric("m1"), minimize=False)], outcome_constraints=[ OutcomeConstraint(metric=Metric("m2"), op=ComparisonOp.GEQ, bound=0.1) ], diff --git a/ax/adapter/tests/test_discrete_adapter.py b/ax/adapter/tests/test_discrete_adapter.py index 523cb8af0c9..3b574104a7a 100644 --- a/ax/adapter/tests/test_discrete_adapter.py +++ b/ax/adapter/tests/test_discrete_adapter.py @@ -143,7 +143,7 @@ def test_predict(self) -> None: def test_gen(self) -> None: # Test with constraints optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("m1"), minimize=True), + objectives=[Objective(metric=Metric("m1"), minimize=True)], outcome_constraints=[ OutcomeConstraint( metric=Metric("m2"), op=ComparisonOp.GEQ, bound=2, relative=False @@ -231,7 +231,7 @@ def test_gen(self) -> None: # Test validation optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("m1"), minimize=False), + objectives=[Objective(metric=Metric("m1"), minimize=False)], outcome_constraints=[ OutcomeConstraint( metric=Metric("m2"), op=ComparisonOp.GEQ, bound=2, relative=True diff --git a/ax/adapter/tests/test_hierarchical_search_space.py b/ax/adapter/tests/test_hierarchical_search_space.py index 0fd03518a3d..1e128bc46ca 100644 --- a/ax/adapter/tests/test_hierarchical_search_space.py +++ b/ax/adapter/tests/test_hierarchical_search_space.py @@ -142,10 +142,12 @@ def _test_gen_base( search_space=hss, tracking_metrics=[metric], optimization_config=OptimizationConfig( - objective=Objective( - metric=metric, - minimize=True, - ) + objectives=[ + Objective( + metric=metric, + minimize=True, + ) + ] ), runner=SyntheticRunner(), ) diff --git a/ax/adapter/tests/test_torch_adapter.py b/ax/adapter/tests/test_torch_adapter.py index 63b5e55195d..6a4e6c6e00b 100644 --- a/ax/adapter/tests/test_torch_adapter.py +++ b/ax/adapter/tests/test_torch_adapter.py @@ -95,7 +95,7 @@ def test_TorchAdapter(self, device: torch.device | None = None) -> None: min=0.0, max=5.0, parameter_names=feature_names ) opt_config = OptimizationConfig( - objective=Objective(metric=Metric("y1"), minimize=True), + objectives=[Objective(metric=Metric("y1"), minimize=True)], outcome_constraints=[ OutcomeConstraint( metric=Metric("y2"), op=ComparisonOp.GEQ, bound=0.0, relative=False @@ -212,7 +212,7 @@ def test_TorchAdapter(self, device: torch.device | None = None) -> None: ) best_point_return_value = torch.tensor([1.0, 2.0, 3.0], **tkwargs) opt_config = OptimizationConfig( - objective=Objective(metric=Metric("y1"), minimize=False), + objectives=[Objective(metric=Metric("y1"), minimize=False)], ) pending_observations = { "y2": [ObservationFeatures(parameters={"x1": 1.0, "x2": 2.0, "x3": 3.0})] @@ -406,7 +406,7 @@ def test_evaluate_acquisition_function(self) -> None: def test_best_point(self) -> None: search_space = get_search_space_for_range_value() oc = OptimizationConfig( - objective=Objective(metric=Metric("a"), minimize=False), + objectives=[Objective(metric=Metric("a"), minimize=False)], outcome_constraints=[], ) exp = Experiment(search_space=search_space, optimization_config=oc, name="test") @@ -469,7 +469,7 @@ def test_best_point(self) -> None: adapter.gen( n=1, optimization_config=OptimizationConfig( - objective=Objective(metric=Metric("a"), minimize=False), + objectives=[Objective(metric=Metric("a"), minimize=False)], outcome_constraints=[ ScalarizedOutcomeConstraint( metrics=[Metric("wrong_metric_name")], @@ -738,7 +738,7 @@ def test_convert_contextual_observations(self) -> None: ) # Make an optimization config that includes all metrics. opt_config = OptimizationConfig( - objective=Objective(metric=Metric("y"), minimize=True), + objectives=[Objective(metric=Metric("y"), minimize=True)], outcome_constraints=[ OutcomeConstraint( metric=Metric(f"y:c{i}"), op=ComparisonOp.GEQ, bound=0 @@ -1205,10 +1205,12 @@ def test_pairwise_preference_generator(self) -> None: surrogate=surrogate, ), optimization_config=OptimizationConfig( - Objective( - metric=Metric(Keys.PAIRWISE_PREFERENCE_QUERY.value), - minimize=False, - ) + objectives=[ + Objective( + metric=Metric(Keys.PAIRWISE_PREFERENCE_QUERY.value), + minimize=False, + ) + ] ), fit_tracking_metrics=False, ) diff --git a/ax/adapter/tests/test_torch_moo_adapter.py b/ax/adapter/tests/test_torch_moo_adapter.py index d929c3f15a7..7f8aebca3d5 100644 --- a/ax/adapter/tests/test_torch_moo_adapter.py +++ b/ax/adapter/tests/test_torch_moo_adapter.py @@ -333,7 +333,6 @@ def test_hypervolume(self, _, cuda: bool = False) -> None: ) for trial in exp.trials.values(): trial.mark_running(no_runner_required=True).mark_completed() - # pyre-fixme[16]: Optional type has no attribute `metrics`. metrics_dict = exp.metrics # Objective thresholds and synthetic observations chosen to have closed-form # hypervolumes to test. @@ -464,9 +463,15 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None: first = sub_exprs[0] if not first.startswith("-"): sub_exprs[0] = f"-{first}" - oc.objective = Objective( - expression=", ".join(sub_exprs), - metric_name_to_signature={s.lstrip("-"): s.lstrip("-") for s in sub_exprs}, + oc = oc.clone_with_args( + objectives=[ + Objective( + expression=", ".join(sub_exprs), + metric_name_to_signature={ + s.lstrip("-"): s.lstrip("-") for s in sub_exprs + }, + ) + ] ) for use_partial_thresholds in (False, True): diff --git a/ax/adapter/transforms/relativize.py b/ax/adapter/transforms/relativize.py index d3fb5dd2376..4d81cb0369d 100644 --- a/ax/adapter/transforms/relativize.py +++ b/ax/adapter/transforms/relativize.py @@ -157,7 +157,7 @@ def transform_optimization_config( "Expected multi-objective, got single-objective" ) new_optimization_config = optimization_config.clone_with_args( - objective=objective, + objectives=[objective], outcome_constraints=constraints, ) elif isinstance(optimization_config, MultiObjectiveOptimizationConfig): @@ -174,13 +174,14 @@ def transform_optimization_config( ) new_optimization_config = optimization_config.clone_with_args( - objective=optimization_config.objective, + objectives=[optimization_config.objective], outcome_constraints=constraints, objective_thresholds=obj_thresholds, ) else: new_optimization_config = optimization_config.clone_with_args( - objective=optimization_config.objective, outcome_constraints=constraints + objectives=[optimization_config.objective], + outcome_constraints=constraints, ) return new_optimization_config diff --git a/ax/adapter/transforms/standardize_y.py b/ax/adapter/transforms/standardize_y.py index 2deaf462583..72513d44bd4 100644 --- a/ax/adapter/transforms/standardize_y.py +++ b/ax/adapter/transforms/standardize_y.py @@ -133,10 +133,12 @@ def transform_optimization_config( (name, new_w) for (name, _), new_w in zip(objective.metric_weights, new_weights) ] - optimization_config.objective = _build_objective_from_metric_weights( - new_metric_weights, - metric_name_to_signature=objective.metric_name_to_signature, - ) + optimization_config._objectives = [ + _build_objective_from_metric_weights( + new_metric_weights, + metric_name_to_signature=objective.metric_name_to_signature, + ) + ] new_constraints = self._transform_constraints( optimization_config.outcome_constraints, adapter diff --git a/ax/adapter/transforms/stratified_standardize_y.py b/ax/adapter/transforms/stratified_standardize_y.py index 9ae063c3507..15670fe7f49 100644 --- a/ax/adapter/transforms/stratified_standardize_y.py +++ b/ax/adapter/transforms/stratified_standardize_y.py @@ -196,10 +196,12 @@ def transform_optimization_config( (name, new_w) for (name, _), new_w in zip(objective.metric_weights, new_weights) ] - optimization_config.objective = _build_objective_from_metric_weights( - new_metric_weights, - metric_name_to_signature=objective.metric_name_to_signature, - ) + optimization_config._objectives = [ + _build_objective_from_metric_weights( + new_metric_weights, + metric_name_to_signature=objective.metric_name_to_signature, + ) + ] optimization_config.outcome_constraints = self._transform_constraints( optimization_config.outcome_constraints, strata, adapter diff --git a/ax/adapter/transforms/tests/test_base_transform.py b/ax/adapter/transforms/tests/test_base_transform.py index f4802aafe5e..446cb2bb89b 100644 --- a/ax/adapter/transforms/tests/test_base_transform.py +++ b/ax/adapter/transforms/tests/test_base_transform.py @@ -116,7 +116,7 @@ def test_transform_optimization_config_with_pruning_target_parameterization( # modifies parameters pruning_target_parameterization = Arm(parameters={"x1": 2.5, "x2": 7.5}) optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("m1"), minimize=False), + objectives=[Objective(metric=Metric("m1"), minimize=False)], pruning_target_parameterization=pruning_target_parameterization, ) @@ -142,7 +142,7 @@ def test_transform_optimization_config_without_pruning_target_parameterization( ) -> None: # Setup: create optimization config without target arm optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("m1"), minimize=False), + objectives=[Objective(metric=Metric("m1"), minimize=False)], pruning_target_parameterization=None, ) @@ -169,7 +169,7 @@ def test_transform_optimization_config_preserves_other_fields(self) -> None: ) ] optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("m1"), minimize=True), + objectives=[Objective(metric=Metric("m1"), minimize=True)], outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) diff --git a/ax/adapter/transforms/tests/test_derelativize_transform.py b/ax/adapter/transforms/tests/test_derelativize_transform.py index e756d8a31ec..d62bfa03a85 100644 --- a/ax/adapter/transforms/tests/test_derelativize_transform.py +++ b/ax/adapter/transforms/tests/test_derelativize_transform.py @@ -106,7 +106,7 @@ def _test_DerelativizeTransform( # Test with no relative constraints objective = Objective(metric=Metric("c"), minimize=True) oc = OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=[ OutcomeConstraint( metric=Metric("m1"), op=ComparisonOp.LEQ, bound=2, relative=False @@ -126,7 +126,7 @@ def _test_DerelativizeTransform( # Test with relative constraint, in-design status quo relative_bound = -10 oc = OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=[ OutcomeConstraint( metric=Metric("m1"), op=ComparisonOp.LEQ, bound=2, relative=False @@ -192,7 +192,7 @@ def _test_DerelativizeTransform( ) g = Adapter(experiment=experiment_2, generator=Generator()) oc = OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=[ OutcomeConstraint( metric=Metric("m1"), op=ComparisonOp.LEQ, bound=2, relative=False @@ -250,7 +250,7 @@ def _test_DerelativizeTransform( ) g = Adapter(experiment=experiment_3, generator=Generator()) oc = OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=[ OutcomeConstraint( metric=Metric("m1"), op=ComparisonOp.LEQ, bound=2, relative=False @@ -282,7 +282,7 @@ def _test_DerelativizeTransform( # Same for scalarized constraint only. oc_scalarized_only = OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=[ ScalarizedOutcomeConstraint( metrics=[Metric("m1"), Metric("m2")], @@ -319,7 +319,7 @@ def _test_DerelativizeTransform( def test_errors(self) -> None: t = Derelativize(search_space=None) oc = OptimizationConfig( - objective=Objective(metric=Metric("c"), minimize=False), + objectives=[Objective(metric=Metric("c"), minimize=False)], outcome_constraints=[ OutcomeConstraint( metric=Metric("m1"), op=ComparisonOp.LEQ, bound=2, relative=True diff --git a/ax/adapter/transforms/tests/test_log_y_transform.py b/ax/adapter/transforms/tests/test_log_y_transform.py index b35ab67ab31..7df1cad7fea 100644 --- a/ax/adapter/transforms/tests/test_log_y_transform.py +++ b/ax/adapter/transforms/tests/test_log_y_transform.py @@ -138,14 +138,14 @@ def test_TransformOptimizationConfig(self) -> None: # basic test m1 = Metric(name="m1") objective_m1 = Objective(metric=m1, minimize=False) - oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[]) + oc = OptimizationConfig(objectives=[objective_m1], outcome_constraints=[]) tf = LogY(search_space=None, config={"metrics": ["m1"]}) oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None) self.assertEqual(oc_tf, oc) # output constraint on a different metric should work m2 = Metric(name="m2") oc = OptimizationConfig( - objective=objective_m1, + objectives=[objective_m1], outcome_constraints=[ get_outcome_constraint(metric=m2, bound=-1, relative=False) ], @@ -155,7 +155,7 @@ def test_TransformOptimizationConfig(self) -> None: # output constraint with a negative bound should fail objective_m2 = Objective(metric=m2, minimize=False) oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=-1.234, relative=False) ], @@ -170,7 +170,7 @@ def test_TransformOptimizationConfig(self) -> None: ) # output constraint with a zero bound should also fail oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=0, relative=False) ], @@ -185,7 +185,7 @@ def test_TransformOptimizationConfig(self) -> None: ) # output constraint with a positive bound should work oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=2.345, relative=False) ], @@ -200,7 +200,7 @@ def test_TransformOptimizationConfig(self) -> None: self.assertEqual(oc_tf, oc) # output constraint with a relative bound should fail oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=2.345, relative=True) ], @@ -228,7 +228,7 @@ def test_TransformOptimizationConfigMOO(self) -> None: ObjectiveThreshold(metric=m2, bound=3.456, relative=False), ] oc = MultiObjectiveOptimizationConfig( - objective=mo, + objectives=[mo], objective_thresholds=objective_thresholds, ) tf = LogY(search_space=None, config={"metrics": ["m1"]}) diff --git a/ax/adapter/transforms/tests/test_objective_as_constraint.py b/ax/adapter/transforms/tests/test_objective_as_constraint.py index 37a70dddc93..3a5c2d16fa9 100644 --- a/ax/adapter/transforms/tests/test_objective_as_constraint.py +++ b/ax/adapter/transforms/tests/test_objective_as_constraint.py @@ -63,9 +63,11 @@ def _make_experiment_adapter_and_data( Tuple of (experiment, adapter, experiment_data). """ optimization_config = OptimizationConfig( - objective=Objective( - metric=Metric("m1", lower_is_better=minimize), minimize=minimize - ), + objectives=[ + Objective( + metric=Metric("m1", lower_is_better=minimize), minimize=minimize + ) + ], outcome_constraints=[ OutcomeConstraint( metric=Metric("m2", lower_is_better=True), @@ -187,9 +189,9 @@ def test_adds_leq_constraint_when_minimizing(self) -> None: def test_no_op_without_status_quo(self) -> None: """Test that the transform is a no-op without a status quo.""" optimization_config = OptimizationConfig( - objective=Objective( - metric=Metric("m1", lower_is_better=False), minimize=False - ), + objectives=[ + Objective(metric=Metric("m1", lower_is_better=False), minimize=False) + ], outcome_constraints=[ OutcomeConstraint( metric=Metric("m2", lower_is_better=True), @@ -220,9 +222,9 @@ def test_no_op_without_status_quo(self) -> None: def test_no_op_without_constraints(self) -> None: """Test that the transform is a no-op when there are no constraints.""" optimization_config = OptimizationConfig( - objective=Objective( - metric=Metric("m1", lower_is_better=False), minimize=False - ), + objectives=[ + Objective(metric=Metric("m1", lower_is_better=False), minimize=False) + ], ) search_space = SearchSpace( parameters=[ @@ -437,7 +439,7 @@ def _make_scalarized_experiment_adapter_and_data( ) else: optimization_config = OptimizationConfig( - objective=scalarized_objective, + objectives=[scalarized_objective], outcome_constraints=outcome_constraints, ) diff --git a/ax/adapter/transforms/tests/test_power_y_transform.py b/ax/adapter/transforms/tests/test_power_y_transform.py index a6f967cfc3c..224b8093c16 100644 --- a/ax/adapter/transforms/tests/test_power_y_transform.py +++ b/ax/adapter/transforms/tests/test_power_y_transform.py @@ -207,7 +207,7 @@ def test_transform_optimization_config(self) -> None: # basic test m1 = Metric(name="m1") objective_m1 = Objective(metric=m1, minimize=False) - oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[]) + oc = OptimizationConfig(objectives=[objective_m1], outcome_constraints=[]) tf = PowerTransformY( search_space=None, experiment_data=self.experiment_data, @@ -219,7 +219,7 @@ def test_transform_optimization_config(self) -> None: m2 = Metric(name="m2") for bound in [-1.234, 0, 2.345]: oc = OptimizationConfig( - objective=objective_m1, + objectives=[objective_m1], outcome_constraints=get_constraint( metric=m2, bound=bound, relative=False ), @@ -230,7 +230,7 @@ def test_transform_optimization_config(self) -> None: objective_m2 = Objective(metric=m2, minimize=False) for bound in [-1.234, 0, 2.345]: oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=get_constraint( metric=m1, bound=bound, relative=False ), @@ -259,7 +259,7 @@ def test_transform_optimization_config(self) -> None: self.assertAlmostEqual(c_actual.bound, c_expected.bound, places=5) # Relative constraints aren't supported oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=get_constraint(metric=m1, bound=2.345, relative=True), ) with self.assertRaisesRegex( @@ -277,7 +277,7 @@ def test_transform_optimization_config(self) -> None: # Support for scalarized outcome constraints isn't implemented m3 = Metric(name="m3") oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ ScalarizedOutcomeConstraint( metrics=[m1, m3], op=ComparisonOp.GEQ, bound=2.345, relative=False @@ -295,7 +295,9 @@ def test_transform_optimization_config(self) -> None: scalarized_objective = ScalarizedObjective( metrics=[m1, m3], weights=[1.0, 2.0], minimize=False ) - oc = OptimizationConfig(objective=scalarized_objective, outcome_constraints=[]) + oc = OptimizationConfig( + objectives=[scalarized_objective], outcome_constraints=[] + ) with self.assertRaisesRegex(NotImplementedError, "ScalarizedObjective"): tf.transform_optimization_config(oc, None, None) diff --git a/ax/adapter/transforms/tests/test_standardize_y_transform.py b/ax/adapter/transforms/tests/test_standardize_y_transform.py index 6985b589328..92828b7b9ed 100644 --- a/ax/adapter/transforms/tests/test_standardize_y_transform.py +++ b/ax/adapter/transforms/tests/test_standardize_y_transform.py @@ -111,7 +111,7 @@ def test_TransformOptimizationConfig(self) -> None: ), ] - oc = OptimizationConfig(objective=objective, outcome_constraints=cons) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=cons) with self.assertRaisesRegex( DataRequiredError, "`StandardizeY` transform requires constraint metric" ): @@ -125,7 +125,7 @@ def test_TransformOptimizationConfig(self) -> None: relative=False, ), ] - oc = OptimizationConfig(objective=objective, outcome_constraints=cons) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=cons) with self.assertRaisesRegex( DataRequiredError, "`StandardizeY` transform requires constraint metric" ): @@ -145,7 +145,7 @@ def test_TransformOptimizationConfig(self) -> None: relative=False, ), ] - oc = OptimizationConfig(objective=objective, outcome_constraints=cons) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=cons) oc = self.t.transform_optimization_config(oc, None, None) # Verify the transformed constraints have the expected values. # We compare properties individually to avoid floating-point string @@ -174,7 +174,7 @@ def test_TransformOptimizationConfig(self) -> None: con = OutcomeConstraint( metric=m1, op=ComparisonOp.GEQ, bound=2.0, relative=True ) - oc = OptimizationConfig(objective=objective, outcome_constraints=[con]) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=[con]) with self.assertRaises(ValueError): oc = self.t.transform_optimization_config(oc, None, None) @@ -229,7 +229,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective = ScalarizedObjective( metrics=[m1, m2], weights=[0.5, 0.5], minimize=False ) - oc = OptimizationConfig(objective=objective) + oc = OptimizationConfig(objectives=[objective]) oc_transformed = self.t.transform_optimization_config(oc, None, None) # Check that weights are scaled by standard deviations @@ -245,7 +245,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective_missing = ScalarizedObjective( metrics=[m1, m3], weights=[0.5, 0.5], minimize=False ) - oc_missing = OptimizationConfig(objective=objective_missing) + oc_missing = OptimizationConfig(objectives=[objective_missing]) with self.assertRaisesRegex( DataRequiredError, "`StandardizeY` transform requires objective metric" ): @@ -255,7 +255,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective_minimize = ScalarizedObjective( metrics=[m1, m2], weights=[1.0, -2.0], minimize=True ) - oc_minimize = OptimizationConfig(objective=objective_minimize) + oc_minimize = OptimizationConfig(objectives=[objective_minimize]) oc_minimize_transformed = self.t.transform_optimization_config( oc_minimize, None, None ) diff --git a/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py b/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py index 52b1f1169bc..cd1c9235531 100644 --- a/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py +++ b/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py @@ -314,7 +314,7 @@ def test_TransformObservations(self) -> None: def test_TransformOptimizationConfig(self) -> None: cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) fixed_features = ObservationFeatures({"z": "a"}) oc = self.t.transform_optimization_config(oc, None, fixed_features) # Verify constraint values approximately (expression-string equality @@ -330,7 +330,7 @@ def test_TransformOptimizationConfig(self) -> None: self.assertTrue(oc.objective == self.objective) # No constraints - oc2 = OptimizationConfig(objective=self.objective) + oc2 = OptimizationConfig(objectives=[self.objective]) oc3 = deepcopy(oc2) fixed_features = ObservationFeatures({"z": "a"}) oc3 = self.t.transform_optimization_config(oc3, None, fixed_features) @@ -340,7 +340,7 @@ def test_TransformOptimizationConfig(self) -> None: con = OutcomeConstraint( metric=self.m1, op=ComparisonOp.GEQ, bound=2.0, relative=True ) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=[con]) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=[con]) with self.assertRaises(ValueError): oc = self.t.transform_optimization_config(oc, None, fixed_features) # Fail without strat param fixed @@ -350,10 +350,10 @@ def test_TransformOptimizationConfig(self) -> None: def test_TransformOptimizationConfigWithStrataMapping(self) -> None: cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) fixed_features = ObservationFeatures({"z": "a"}) cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) oc = self.t2.transform_optimization_config(oc, None, fixed_features) # Verify constraint values approximately. self.assertEqual(len(oc.outcome_constraints), 2) @@ -367,7 +367,7 @@ def test_TransformOptimizationConfigWithStrataMapping(self) -> None: self.assertTrue(oc.objective == self.objective) fixed_features = ObservationFeatures({"z": "c"}) cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) oc = self.t2.transform_optimization_config(oc, None, fixed_features) # Verify constraint values approximately. self.assertEqual(len(oc.outcome_constraints), 2) @@ -441,7 +441,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective = ScalarizedObjective( metrics=[self.m1, self.m2], weights=[0.5, 0.5], minimize=False ) - oc = OptimizationConfig(objective=objective) + oc = OptimizationConfig(objectives=[objective]) expected_weights = { "a": [0.5 * 1.0, 0.5 * sqrt(2) * 3], "b": [0.5 * 2 * sqrt(2), 0.5 * sqrt(2) * 0.5], @@ -476,7 +476,7 @@ def test_TransformAndUntransformScalarizedOutcomeConstraint(self) -> None: relative=False, ) oc = OptimizationConfig( - objective=self.objective, outcome_constraints=[scalarized_constraint] + objectives=[self.objective], outcome_constraints=[scalarized_constraint] ) fixed_features = ObservationFeatures({"z": "a"}) oc_transformed = self.t.transform_optimization_config(oc, None, fixed_features) diff --git a/ax/adapter/transforms/tests/test_winsorize_transform.py b/ax/adapter/transforms/tests/test_winsorize_transform.py index e67dff8b1c2..051273a7cad 100644 --- a/ax/adapter/transforms/tests/test_winsorize_transform.py +++ b/ax/adapter/transforms/tests/test_winsorize_transform.py @@ -350,11 +350,13 @@ def test_winsorization_with_optimization_config(self) -> None: # Scalarized objective (deprecated class) for minimize in [True, False]: experiment.optimization_config = OptimizationConfig( - objective=ScalarizedObjective( - metrics=[Metric(name="m1"), Metric(name="m2")], - weights=[1, -1], - minimize=minimize, - ) + objectives=[ + ScalarizedObjective( + metrics=[Metric(name="m1"), Metric(name="m2")], + weights=[1, -1], + minimize=minimize, + ) + ] ) adapter = Adapter(experiment=experiment, generator=Generator()) transform = Winsorize(experiment_data=experiment_data, adapter=adapter) @@ -372,9 +374,12 @@ def test_winsorization_with_optimization_config(self) -> None: # ScalarizedObjective(metrics=[m1, m2], weights=[1, -1], minimize=False) # since expressions are always maximized. experiment.optimization_config = OptimizationConfig( - objective=Objective( - expression="m1 - m2", metric_name_to_signature={"m1": "m1", "m2": "m2"} - ) + objectives=[ + Objective( + expression="m1 - m2", + metric_name_to_signature={"m1": "m1", "m2": "m2"}, + ) + ] ) adapter = Adapter(experiment=experiment, generator=Generator()) transform = Winsorize(experiment_data=experiment_data, adapter=adapter) @@ -384,9 +389,12 @@ def test_winsorization_with_optimization_config(self) -> None: ) # Negated expression "-m1 + m2" is equivalent to minimize=True above. experiment.optimization_config = OptimizationConfig( - objective=Objective( - expression="-m1 + m2", metric_name_to_signature={"m1": "m1", "m2": "m2"} - ) + objectives=[ + Objective( + expression="-m1 + m2", + metric_name_to_signature={"m1": "m1", "m2": "m2"}, + ) + ] ) adapter = Adapter(experiment=experiment, generator=Generator()) transform = Winsorize(experiment_data=experiment_data, adapter=adapter) @@ -399,7 +407,7 @@ def test_winsorization_with_optimization_config(self) -> None: m2 = Metric(name="m2", lower_is_better=True) m3 = Metric(name="m3") experiment.optimization_config = OptimizationConfig( - objective=Objective(metric=m2, minimize=True) + objectives=[Objective(metric=m2, minimize=True)] ) adapter = Adapter(experiment=experiment, generator=Generator()) transform = Winsorize(experiment_data=experiment_data, adapter=adapter) @@ -411,7 +419,7 @@ def test_winsorization_with_optimization_config(self) -> None: metric=m1, op=ComparisonOp.LEQ, bound=3, relative=True ) experiment.optimization_config = OptimizationConfig( - objective=Objective(metric=m2, minimize=True), + objectives=[Objective(metric=m2, minimize=True)], outcome_constraints=[outcome_constraint], ) adapter = Adapter(experiment=experiment, generator=Generator()) @@ -472,7 +480,9 @@ def test_winsorization_with_optimization_config(self) -> None: moo_objective = MultiObjective( [Objective(metric=m1, minimize=False), Objective(metric=m2, minimize=True)] ) - optimization_config = MultiObjectiveOptimizationConfig(objective=moo_objective) + optimization_config = MultiObjectiveOptimizationConfig( + objectives=[moo_objective] + ) experiment._optimization_config = optimization_config adapter = Adapter(experiment=experiment, generator=Generator()) with warnings.catch_warnings(record=True) as ws: @@ -493,7 +503,7 @@ def test_winsorization_with_optimization_config(self) -> None: ObjectiveThreshold(m2, 4, relative=True), ] optimization_config = MultiObjectiveOptimizationConfig( - objective=moo_objective, + objectives=[moo_objective], objective_thresholds=objective_thresholds, outcome_constraints=[], ) @@ -536,7 +546,7 @@ def test_relative_constraints(self) -> None: ) # Test with relative constraint, in-design status quo oc = OptimizationConfig( - objective=Objective(metric=Metric("c"), minimize=False), + objectives=[Objective(metric=Metric("c"), minimize=False)], outcome_constraints=[ OutcomeConstraint( metric=Metric("a"), op=ComparisonOp.LEQ, bound=2, relative=False diff --git a/ax/analysis/healthcheck/tests/test_complexity_rating.py b/ax/analysis/healthcheck/tests/test_complexity_rating.py index e715f0d20f7..73b1fe1de7b 100644 --- a/ax/analysis/healthcheck/tests/test_complexity_rating.py +++ b/ax/analysis/healthcheck/tests/test_complexity_rating.py @@ -103,11 +103,13 @@ def test_objectives_count(self) -> None: if m.name not in self.experiment.metrics: self.experiment.add_tracking_metric(m) self.experiment._optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[ - Objective(metric=m, minimize=False) for m in metrics - ] - ) + objectives=[ + MultiObjective( + objectives=[ + Objective(metric=m, minimize=False) for m in metrics + ] + ) + ] ) card = ComplexityRatingAnalysis( options=self.options, tier_metadata=self.tier_metadata @@ -125,7 +127,7 @@ def test_constraints(self) -> None: if m.name not in self.experiment.metrics: self.experiment.add_tracking_metric(m) self.experiment._optimization_config = OptimizationConfig( - objective=Objective(metric=Metric(name="obj"), minimize=False), + objectives=[Objective(metric=Metric(name="obj"), minimize=False)], outcome_constraints=[ OutcomeConstraint(metric=m, op=ComparisonOp.LEQ, bound=1.0) for m in metrics diff --git a/ax/analysis/healthcheck/tests/test_constraints_feasibility.py b/ax/analysis/healthcheck/tests/test_constraints_feasibility.py index b085c1e0249..c169bd12a11 100644 --- a/ax/analysis/healthcheck/tests/test_constraints_feasibility.py +++ b/ax/analysis/healthcheck/tests/test_constraints_feasibility.py @@ -195,7 +195,7 @@ def test_no_constraints(self) -> None: branin_d_means=[1.0, 1.0, 2.0, 3.0, 4.0, 5.0] ) experiment.optimization_config = OptimizationConfig( - objective=Objective(metric=Metric(name="branin_a"), minimize=False), + objectives=[Objective(metric=Metric(name="branin_a"), minimize=False)], outcome_constraints=[], ) @@ -275,7 +275,7 @@ def test_validate_applicable_state(self) -> None: # (should be valid) experiment_no_constraints = get_branin_experiment(with_status_quo=True) experiment_no_constraints.optimization_config = OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=True), + objectives=[Objective(metric=Metric(name="branin"), minimize=True)], outcome_constraints=[], ) validation_error = icfa.validate_applicable_state( diff --git a/ax/analysis/healthcheck/tests/test_early_stopping_healthcheck.py b/ax/analysis/healthcheck/tests/test_early_stopping_healthcheck.py index bae7cc8b37a..6685d0c8917 100644 --- a/ax/analysis/healthcheck/tests/test_early_stopping_healthcheck.py +++ b/ax/analysis/healthcheck/tests/test_early_stopping_healthcheck.py @@ -320,9 +320,14 @@ def test_get_problem_type_via_disabled_config(self) -> None: metric1 = get_branin_metric(name="m1") metric2 = get_branin_metric(name="m2") experiment._optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[Objective(metric=metric1), Objective(metric=metric2)] - ) + objectives=[ + MultiObjective( + objectives=[ + Objective(metric=metric1), + Objective(metric=metric2), + ] + ) + ] ) card = healthcheck.compute(experiment=experiment) df_dict = self._get_df_dict(card) @@ -333,7 +338,7 @@ def test_get_problem_type_via_disabled_config(self) -> None: metric = get_branin_metric(name="m1") constraint_metric = get_branin_metric(name="constraint_metric") experiment._optimization_config = OptimizationConfig( - objective=Objective(metric=metric), + objectives=[Objective(metric=metric)], outcome_constraints=[ OutcomeConstraint( metric=constraint_metric, op=ComparisonOp.LEQ, bound=10.0 diff --git a/ax/analysis/plotly/tests/test_bandit_rollout.py b/ax/analysis/plotly/tests/test_bandit_rollout.py index 0b56f12de6e..2647cd4bb79 100644 --- a/ax/analysis/plotly/tests/test_bandit_rollout.py +++ b/ax/analysis/plotly/tests/test_bandit_rollout.py @@ -95,7 +95,7 @@ def test_stale_failed_trial_filtering(self) -> None: ] ), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="foo"), minimize=False) + objectives=[Objective(metric=Metric(name="foo"), minimize=False)] ), ) diff --git a/ax/analysis/plotly/tests/test_marginal_effects.py b/ax/analysis/plotly/tests/test_marginal_effects.py index 57970e98f3d..83a24c78370 100644 --- a/ax/analysis/plotly/tests/test_marginal_effects.py +++ b/ax/analysis/plotly/tests/test_marginal_effects.py @@ -32,7 +32,7 @@ def setUp(self) -> None: name="test_experiment", is_test=True, optimization_config=OptimizationConfig( - objective=Objective(metric=Metric("metric_1", lower_is_better=False)) + objectives=[Objective(metric=Metric("metric_1", lower_is_better=False))] ), ) num_arms = 3 diff --git a/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py b/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py index 42e67c0e945..1f4d1343e9b 100644 --- a/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py +++ b/ax/analysis/plotly/tests/test_objective_p_feasible_frontier.py @@ -36,7 +36,7 @@ def setUp(self) -> None: with_completed_batch=True, with_absolute_constraint=True, num_objectives=3 ) self.experiment.optimization_config = OptimizationConfig( - objective=Objective(metric=self.experiment.metrics["branin_a"]), + objectives=[Objective(metric=self.experiment.metrics["branin_a"])], outcome_constraints=self.experiment.optimization_config.outcome_constraints, ) opt_config = none_throws(self.experiment.optimization_config) @@ -171,11 +171,13 @@ def test_validate_applicable_state(self) -> None: ) self.experiment.optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[ - Objective(metric=m) for m in self.experiment.metrics.values() - ] - ) + objectives=[ + MultiObjective( + objectives=[ + Objective(metric=m) for m in self.experiment.metrics.values() + ] + ) + ] ) self.assertIn( @@ -218,13 +220,15 @@ def test_validate_applicable_state(self) -> None: def test_scalarized_objective_raises(self) -> None: """Scalarized objectives should be rejected in validate_applicable_state.""" self.experiment.optimization_config = OptimizationConfig( - objective=Objective( - expression="2*branin_a + -1*branin_b", - metric_name_to_signature={ - "branin_a": "branin_a", - "branin_b": "branin_b", - }, - ), + objectives=[ + Objective( + expression="2*branin_a + -1*branin_b", + metric_name_to_signature={ + "branin_a": "branin_a", + "branin_b": "branin_b", + }, + ) + ], outcome_constraints=none_throws( self.experiment.optimization_config ).outcome_constraints, diff --git a/ax/analysis/tests/test_diagnostics.py b/ax/analysis/tests/test_diagnostics.py index 4a4b4fcbe8f..c4e6fac8964 100644 --- a/ax/analysis/tests/test_diagnostics.py +++ b/ax/analysis/tests/test_diagnostics.py @@ -182,10 +182,12 @@ def test_compute_bandit(self) -> None: ] ), optimization_config=OptimizationConfig( - objective=Objective( - metric=Metric(name="booth"), - minimize=True, - ) + objectives=[ + Objective( + metric=Metric(name="booth"), + minimize=True, + ) + ] ), ) diff --git a/ax/analysis/tests/test_overview.py b/ax/analysis/tests/test_overview.py index 51837405151..4e40f48a6c4 100644 --- a/ax/analysis/tests/test_overview.py +++ b/ax/analysis/tests/test_overview.py @@ -199,10 +199,12 @@ def test_bandit_experiment_dispatch(self) -> None: ] ), optimization_config=OptimizationConfig( - objective=Objective( - metric=Metric(name="booth"), - minimize=True, - ) + objectives=[ + Objective( + metric=Metric(name="booth"), + minimize=True, + ) + ] ), ) diff --git a/ax/analysis/tests/test_results.py b/ax/analysis/tests/test_results.py index 5282bc5736f..5755ad047eb 100644 --- a/ax/analysis/tests/test_results.py +++ b/ax/analysis/tests/test_results.py @@ -374,7 +374,7 @@ def test_compute_with_bandit_experiment(self) -> None: ] ), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="foo"), minimize=True) + objectives=[Objective(metric=Metric(name="foo"), minimize=True)] ), ) diff --git a/ax/api/tests/test_client.py b/ax/api/tests/test_client.py index cc3dceb882d..7ac4539cfe2 100644 --- a/ax/api/tests/test_client.py +++ b/ax/api/tests/test_client.py @@ -165,9 +165,11 @@ def test_configure_optimization(self) -> None: self.assertEqual( client._experiment.optimization_config, OptimizationConfig( - objective=Objective( - metric=MapMetric(name="ne", lower_is_better=True), minimize=True - ), + objectives=[ + Objective( + metric=MapMetric(name="ne", lower_is_better=True), minimize=True + ) + ], outcome_constraints=[ OutcomeConstraint( metric=MapMetric(name="qps"), diff --git a/ax/api/utils/instantiation/from_string.py b/ax/api/utils/instantiation/from_string.py index 5b2ebfb787b..9989214f67d 100644 --- a/ax/api/utils/instantiation/from_string.py +++ b/ax/api/utils/instantiation/from_string.py @@ -81,7 +81,7 @@ def optimization_config_from_string( true_outcome_constraints.append(outcome_constraint) return MultiObjectiveOptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=true_outcome_constraints, objective_thresholds=objective_thresholds, ) @@ -97,6 +97,6 @@ def optimization_config_from_string( ) return OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=outcome_constraints, ) diff --git a/ax/api/utils/instantiation/tests/test_from_string.py b/ax/api/utils/instantiation/tests/test_from_string.py index a2fa27c55d8..f4de14020f4 100644 --- a/ax/api/utils/instantiation/tests/test_from_string.py +++ b/ax/api/utils/instantiation/tests/test_from_string.py @@ -21,9 +21,9 @@ def test_optimization_config_from_string(self) -> None: self.assertEqual( only_objective, OptimizationConfig( - objective=Objective( - expression="ne", metric_name_to_signature={"ne": "ne"} - ), + objectives=[ + Objective(expression="ne", metric_name_to_signature={"ne": "ne"}) + ], ), ) @@ -33,9 +33,9 @@ def test_optimization_config_from_string(self) -> None: self.assertEqual( with_constraints, OptimizationConfig( - objective=Objective( - expression="ne", metric_name_to_signature={"ne": "ne"} - ), + objectives=[ + Objective(expression="ne", metric_name_to_signature={"ne": "ne"}) + ], outcome_constraints=[ OutcomeConstraint( expression="qps >= 0", metric_name_to_signature={"qps": "qps"} @@ -51,10 +51,12 @@ def test_optimization_config_from_string(self) -> None: self.assertEqual( with_constraints_and_objective_threshold, MultiObjectiveOptimizationConfig( - objective=Objective( - expression="-ne, qps", - metric_name_to_signature={"ne": "ne", "qps": "qps"}, - ), + objectives=[ + Objective( + expression="-ne, qps", + metric_name_to_signature={"ne": "ne", "qps": "qps"}, + ) + ], outcome_constraints=[ OutcomeConstraint( expression="flops <= 1000000", diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 2e07f1aa279..6f0253f54c5 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -334,7 +334,7 @@ def get_soo_opt_config( ) config = OptimizationConfig( - objective=objective, outcome_constraints=outcome_constraints + objectives=[objective], outcome_constraints=outcome_constraints ) return config, [obj_metric] + constraint_metrics @@ -422,7 +422,7 @@ def get_moo_opt_config( ) optimization_config = MultiObjectiveOptimizationConfig( - objective=objective, + objectives=[objective], objective_thresholds=objective_thresholds, outcome_constraints=outcome_constraints, ) diff --git a/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py b/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py index 3d5928abf3d..04364dc78d8 100644 --- a/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py +++ b/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py @@ -182,14 +182,16 @@ def get_cifar10_surrogate_benchmark( ) optimization_config = OptimizationConfig( - objective=Objective( - metric=BenchmarkMetric( - name="CIFAR10 Test Accuracy", - lower_is_better=False, - observe_noise_sd=False, - ), - minimize=False, - ) + objectives=[ + Objective( + metric=BenchmarkMetric( + name="CIFAR10 Test Accuracy", + lower_is_better=False, + observe_noise_sd=False, + ), + minimize=False, + ) + ], ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py b/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py index f2efc48b65b..3a20512a210 100644 --- a/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py +++ b/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py @@ -182,14 +182,16 @@ def get_fashion_mnist_surrogate_benchmark( ) optimization_config = OptimizationConfig( - objective=Objective( - metric=BenchmarkMetric( - name="FashionMNIST Test Accuracy", - lower_is_better=False, - observe_noise_sd=False, - ), - minimize=False, - ) + objectives=[ + Objective( + metric=BenchmarkMetric( + name="FashionMNIST Test Accuracy", + lower_is_better=False, + observe_noise_sd=False, + ), + minimize=False, + ) + ], ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py b/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py index 7235ed9c3d7..52c9131cb6a 100644 --- a/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py +++ b/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py @@ -150,14 +150,16 @@ def get_mnist_surrogate_benchmark( ) optimization_config = OptimizationConfig( - objective=Objective( - metric=BenchmarkMetric( - name="MNIST Test Accuracy", - lower_is_better=False, - observe_noise_sd=False, - ), - minimize=False, - ) + objectives=[ + Objective( + metric=BenchmarkMetric( + name="MNIST Test Accuracy", + lower_is_better=False, + observe_noise_sd=False, + ), + minimize=False, + ) + ], ) return BenchmarkProblem( diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 13b47fb28a4..9999f02ba3e 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -36,10 +36,12 @@ def test_mismatch_of_names_on_test_function_and_opt_config_raises(self) -> None: botorch_problem=Branin(), outcome_names=["Branin"] ) opt_config = MultiObjectiveOptimizationConfig( - objective=Objective( - expression="-Branin, -Currin", - metric_name_to_signature={"Branin": "Branin", "Currin": "Currin"}, - ), + objectives=[ + Objective( + expression="-Branin, -Currin", + metric_name_to_signature={"Branin": "Branin", "Currin": "Currin"}, + ) + ], objective_thresholds=[ OutcomeConstraint( expression="Branin <= 0.0", @@ -68,9 +70,11 @@ def test_mismatch_of_names_on_test_function_and_opt_config_raises(self) -> None: ) opt_config2 = OptimizationConfig( - objective=Objective( - expression="-Branin", metric_name_to_signature={"Branin": "Branin"} - ), + objectives=[ + Objective( + expression="-Branin", metric_name_to_signature={"Branin": "Branin"} + ) + ], outcome_constraints=[ OutcomeConstraint( expression="c <= 0.0", metric_name_to_signature={"c": "c"} @@ -106,7 +110,7 @@ def test_missing_names_on_test_function_with_scalarized_objective(self) -> None: botorch_problem=BraninCurrin(), outcome_names=["BraninCurrin_0", "BraninCurrin_1"], ) - opt_config = OptimizationConfig(objective=objective) + opt_config = OptimizationConfig(objectives=[objective]) with self.assertRaisesRegex( ValueError, "The following objectives are defined on " diff --git a/ax/core/optimization_config.py b/ax/core/optimization_config.py index c2289916da1..dd47532f1e2 100644 --- a/ax/core/optimization_config.py +++ b/ax/core/optimization_config.py @@ -8,6 +8,7 @@ from __future__ import annotations +import warnings from collections.abc import Mapping from itertools import groupby from typing import Self @@ -15,8 +16,9 @@ from ax.core.arm import Arm from ax.core.objective import Objective from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint -from ax.exceptions.core import UserInputError +from ax.exceptions.core import UnsupportedError, UserInputError from ax.utils.common.base import Base +from pyre_extensions import none_throws TRefPoint = list[OutcomeConstraint] @@ -39,9 +41,12 @@ class OptimizationConfig(Base): - """An optimization configuration, which comprises an objective + """An optimization configuration, which comprises one or more objectives and outcome constraints. + For single-objective optimization, pass a single ``objective``. + For multi-objective optimization, pass a list of ``objectives``. + There is no minimum or maximum number of outcome constraints, but an individual metric can have at most two constraints--which is how we represent metrics with both upper and lower bounds. @@ -49,14 +54,20 @@ class OptimizationConfig(Base): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: list[OutcomeConstraint] | None = None, pruning_target_parameterization: Arm | None = None, ) -> None: """Inits OptimizationConfig. Args: - objective: Metric+direction to use for the optimization. + objective: Metric+direction to use for the optimization. Mutually + exclusive with ``objectives``. + objectives: List of objectives for multi-objective optimization. + Mutually exclusive with ``objective``. Each element must be a + single or scalarized Objective (not multi-objective). outcome_constraints: Constraints on metrics. pruning_target_parameterization: Arm containing the target values for irrelevant parameters. The target values are used to prune irrelevant @@ -70,14 +81,35 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ + if objective is not None: + warnings.warn( + "Passing `objective` to OptimizationConfig is deprecated. " + "Use `objectives=[objective]` instead.", + DeprecationWarning, + stacklevel=2, + ) + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives`. " + "Use `objectives=[objective]` instead." + ) + if objective is None and objectives is None: + raise UserInputError("Must specify either `objective` or `objectives`.") + + if objectives is not None: + if len(objectives) == 0: + raise UserInputError("`objectives` must not be empty.") + self._objectives: list[Objective] = objectives + else: + self._objectives = [none_throws(objective)] + constraints: list[OutcomeConstraint] = ( [] if outcome_constraints is None else outcome_constraints ) self._validate_transformed_optimization_config( - objective=objective, + objectives=self._objectives, outcome_constraints=constraints, ) - self._objective: Objective = objective self._outcome_constraints: list[OutcomeConstraint] = constraints self.pruning_target_parameterization = pruning_target_parameterization @@ -87,13 +119,24 @@ def clone(self) -> Self: def clone_with_args( self, - objective: Objective | None = None, + *, + objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, pruning_target_parameterization: Arm | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> Self: - """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + """Make a copy of this optimization config. + + Args: + objectives: Replace with a list of objectives. + outcome_constraints: Replace outcome constraints. Pass ``None`` + to clear them. + pruning_target_parameterization: Replace the pruning target. + """ + if objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] outcome_constraints = ( [constraint.clone() for constraint in self.outcome_constraints] if outcome_constraints is _NO_OUTCOME_CONSTRAINTS @@ -106,23 +149,35 @@ def clone_with_args( ) return self.__class__( - objective=objective, + objectives=cloned_objectives, outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) + @property + def objectives(self) -> list[Objective]: + """Get the list of objectives. + + For single-objective optimization, this returns a single-element list. + For multi-objective optimization, this returns all objectives. + """ + return self._objectives + @property def objective(self) -> Objective: - """Get objective.""" - return self._objective + """Get the single objective. - @objective.setter - def objective(self, objective: Objective) -> None: - """Set objective if not present in outcome constraints.""" - self._validate_transformed_optimization_config( - objective, self.outcome_constraints - ) - self._objective = objective + For single-objective or scalarized-objective configs, returns the + objective. For multi-objective configs (multiple objectives in the + list), raises ``UserInputError`` -- use ``objectives`` instead. + """ + if len(self._objectives) > 1: + raise UnsupportedError( + "This OptimizationConfig has multiple objectives. " + "Use `objectives` to access the list of objectives, or " + "iterate over individual objectives." + ) + return self._objectives[0] @property def all_constraints(self) -> list[OutcomeConstraint]: @@ -136,17 +191,26 @@ def outcome_constraints(self) -> list[OutcomeConstraint]: @property def objective_thresholds(self) -> list[OutcomeConstraint]: - """Get objective thresholds.""" + """Get objective thresholds. + + Returns outcome constraints whose primary metric is an objective + metric. + """ + all_obj_metric_names: set[str] = set() + for obj in self._objectives: + all_obj_metric_names.update(obj.metric_names) return [ threshold for threshold in self.outcome_constraints - if threshold.metric_names[0] in self.objective.metric_names + if threshold.metric_names[0] in all_obj_metric_names ] @property def metric_names(self) -> set[str]: - """All metric names referenced by the objective and constraints.""" - names: set[str] = set(self.objective.metric_names) + """All metric names referenced by the objectives and constraints.""" + names: set[str] = set() + for obj in self._objectives: + names.update(obj.metric_names) for oc in self.all_constraints: names.update(oc.metric_names) return names @@ -154,10 +218,11 @@ def metric_names(self) -> set[str]: @property def metric_name_to_signature(self) -> dict[str, str]: """Aggregated mapping from all metric names to their canonical - signatures, across the objective and all constraints. + signatures, across all objectives and all constraints. """ mapping: dict[str, str] = {} - mapping.update(self.objective.metric_name_to_signature) + for obj in self._objectives: + mapping.update(obj.metric_name_to_signature) for constraint in self.all_constraints: mapping.update(constraint.metric_name_to_signature) return mapping @@ -165,22 +230,30 @@ def metric_name_to_signature(self) -> dict[str, str]: def update_metric_name_to_signature_mapping( self, mapping: Mapping[str, str] ) -> None: - """Set the metric name to signature mapping on the objective and all + """Set the metric name to signature mapping on all objectives and constraints. """ - self.objective.update_metric_name_to_signature_mapping(mapping) + for obj in self._objectives: + obj.update_metric_name_to_signature_mapping(mapping) for constraint in self.all_constraints: constraint.update_metric_name_to_signature_mapping(mapping) @property def metric_signatures(self) -> set[str]: - """All metric signatures referenced by the objective and constraints.""" + """All metric signatures referenced by the objectives and constraints.""" mapping = self.metric_name_to_signature return {mapping[name] for name in self.metric_names} @property def is_moo_problem(self) -> bool: - return self.objective is not None and self.objective.is_multi_objective + """Whether this is a multi-objective optimization problem. + + True when there are multiple objectives in the list, or when a single + objective is a (legacy) multi-objective expression. + """ + if len(self._objectives) > 1: + return True + return self._objectives[0].is_multi_objective @property def is_bope_problem(self) -> bool: @@ -195,37 +268,53 @@ def is_bope_problem(self) -> bool: @outcome_constraints.setter def outcome_constraints(self, outcome_constraints: list[OutcomeConstraint]) -> None: """Set outcome constraints if valid, else raise.""" - self._validate_transformed_optimization_config( - objective=self.objective, + unconstrainable: list[str] = [] + for obj in self._objectives: + unconstrainable.extend(obj.get_unconstrainable_metric_names()) + self._validate_outcome_constraints( + unconstrainable_metric_names=unconstrainable, outcome_constraints=outcome_constraints, ) self._outcome_constraints = outcome_constraints @staticmethod def _validate_transformed_optimization_config( - objective: Objective, + objectives: list[Objective], outcome_constraints: list[OutcomeConstraint] | None = None, ) -> None: - """Ensure outcome constraints are valid. + """Validate objectives and outcome constraints. - Either one or two outcome constraints can reference one metric. - If there are two constraints, they must have different 'ops': one - LEQ and one GEQ. - If there are two constraints, the bound of the GEQ op must be less - than the bound of the LEQ op. + Ensures no multi-objective expressions in individual objectives, + no duplicate metrics across objectives, outcome constraints don't + constrain objective metrics, and that constraint pairs on the + same metric are valid. + + Subclasses (e.g. ``MultiObjectiveOptimizationConfig``) override + this to allow multi-objective expressions. Args: - objective: Metric+direction to use for the optimization. + objectives: List of objectives to validate. outcome_constraints: Constraints to validate. """ - if objective.is_multi_objective: - # Raise error on multi-objective; `ScalarizedObjective` is OK - raise ValueError( - "OptimizationConfig does not support MultiObjective. " - "Use MultiObjectiveOptimizationConfig instead." - ) + all_metric_names: list[str] = [] + for obj in objectives: + if obj.is_multi_objective: + raise ValueError( + "Each objective in `objectives` must be a single or " + "scalarized objective, not a multi-objective. " + "Pass each sub-objective as a separate list element." + ) + for name in obj.metric_names: + if name in all_metric_names: + raise UserInputError( + f"Metric '{name}' appears in multiple objectives. " + "Each metric can only appear in one objective." + ) + all_metric_names.append(name) outcome_constraints = outcome_constraints or [] - unconstrainable_metric_names = objective.get_unconstrainable_metric_names() + unconstrainable_metric_names: list[str] = [] + for obj in objectives: + unconstrainable_metric_names.extend(obj.get_unconstrainable_metric_names()) OptimizationConfig._validate_outcome_constraints( unconstrainable_metric_names=unconstrainable_metric_names, outcome_constraints=outcome_constraints, @@ -274,7 +363,7 @@ def constraint_key(oc: OutcomeConstraint) -> str: def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" - "objective=" + repr(self.objective) + ", " + "objectives=" + repr(self._objectives) + ", " "outcome_constraints=" + repr(self.outcome_constraints) + ")" ) @@ -299,7 +388,9 @@ class MultiObjectiveOptimizationConfig(OptimizationConfig): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: list[OutcomeConstraint] | None = None, objective_thresholds: list[OutcomeConstraint] | None = None, pruning_target_parameterization: Arm | None = None, @@ -308,9 +399,12 @@ def __init__( Args: objective: Metric+direction to use for the optimization. Should be either a - MultiObjective or a ScalarizedObjective. + MultiObjective or a ScalarizedObjective. Mutually exclusive with + ``objectives``. + objectives: List containing the objective. Mutually exclusive + with ``objective``. outcome_constraints: Constraints on metrics. - objective_thesholds: Thresholds objectives must exceed. Used for + objective_thresholds: Thresholds objectives must exceed. Used for multi-objective optimization and for calculating frontiers and hypervolumes. pruning_target_parameterization: Arm containing the target values for @@ -325,24 +419,24 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ - constraints: list[OutcomeConstraint] = ( - [] if outcome_constraints is None else outcome_constraints + super().__init__( + objective=objective, + objectives=objectives, + outcome_constraints=outcome_constraints, + pruning_target_parameterization=pruning_target_parameterization, ) - objective_thresholds = objective_thresholds or [] + # Validate and set objective thresholds (MOOC-specific). + self._objective_thresholds: list[OutcomeConstraint] = objective_thresholds or [] self._validate_transformed_optimization_config( - objective=objective, - outcome_constraints=constraints, - objective_thresholds=objective_thresholds, + objectives=self._objectives, + outcome_constraints=self._outcome_constraints, + objective_thresholds=self._objective_thresholds, ) - self._objective: Objective = objective - self._outcome_constraints: list[OutcomeConstraint] = constraints - self._objective_thresholds: list[OutcomeConstraint] = objective_thresholds - self.pruning_target_parameterization = pruning_target_parameterization - # pyre-fixme[14]: Inconsistent override. def clone_with_args( self, - objective: Objective | None = None, + *, + objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, objective_thresholds: None | (list[OutcomeConstraint]) = _NO_OBJECTIVE_THRESHOLDS, @@ -350,7 +444,10 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> "MultiObjectiveOptimizationConfig": """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + if objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] outcome_constraints = ( [constraint.clone() for constraint in self.outcome_constraints] if outcome_constraints is _NO_OUTCOME_CONSTRAINTS @@ -367,7 +464,7 @@ def clone_with_args( else pruning_target_parameterization ) return MultiObjectiveOptimizationConfig( - objective=objective, + objectives=cloned_objectives, outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, pruning_target_parameterization=pruning_target_parameterization, @@ -375,18 +472,18 @@ def clone_with_args( @property def objective(self) -> Objective: - """Get objective.""" - return self._objective + """Get the (multi-)objective.""" + return self._objectives[0] @objective.setter def objective(self, objective: Objective) -> None: """Set objective if not present in outcome constraints.""" self._validate_transformed_optimization_config( - objective=objective, + objectives=[objective], outcome_constraints=self.outcome_constraints, objective_thresholds=self.objective_thresholds, ) - self._objective = objective + self._objectives = [objective] @property def all_constraints(self) -> list[OutcomeConstraint]: @@ -404,7 +501,7 @@ def objective_thresholds( ) -> None: """Set outcome constraints if valid, else raise.""" self._validate_transformed_optimization_config( - objective=self.objective, + objectives=self._objectives, objective_thresholds=objective_thresholds, ) self._objective_thresholds = objective_thresholds @@ -418,7 +515,7 @@ def objective_thresholds_dict(self) -> dict[str, OutcomeConstraint]: @staticmethod def _validate_transformed_optimization_config( - objective: Objective, + objectives: list[Objective], outcome_constraints: list[OutcomeConstraint] | None = None, objective_thresholds: list[OutcomeConstraint] | None = None, ) -> None: @@ -431,10 +528,11 @@ def _validate_transformed_optimization_config( than the bound of the LEQ op. Args: - objective: Metric+direction to use for the optimization. + objectives: List of objectives to validate. outcome_constraints: Constraints to validate. objective_thresholds: Thresholds objectives must exceed. """ + objective = objectives[0] if not (objective.is_multi_objective or objective.is_scalarized_objective): raise TypeError( "`MultiObjectiveOptimizationConfig` requires an objective " @@ -471,7 +569,7 @@ def _validate_transformed_optimization_config( def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" - "objective=" + repr(self.objective) + ", " + "objectives=" + repr(self._objectives) + ", " "outcome_constraints=" + repr(self.outcome_constraints) + ", " "objective_thresholds=" + repr(self.objective_thresholds) + ")" ) @@ -514,7 +612,9 @@ def check_objective_thresholds_match_objectives( class PreferenceOptimizationConfig(MultiObjectiveOptimizationConfig): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, preference_profile_name: str, outcome_constraints: list[OutcomeConstraint] | None = None, expect_relativized_outcomes: bool = False, @@ -524,7 +624,9 @@ def __init__( Args: objective: Metric+direction to use for the optimization. Should be a - MultiObjective. + MultiObjective. Mutually exclusive with ``objectives``. + objectives: List containing the objective. Mutually exclusive + with ``objective``. preference_profile_name: The name of the auxiliary experiment to use as the preference profile for the experiment. An auxiliary experiment with this name and purpose PE_EXPERIMENT should be attached to @@ -548,12 +650,6 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ - if not objective.is_multi_objective: - raise TypeError( - "`PreferenceOptimizationConfig` requires a multi-objective. " - "Use `OptimizationConfig` instead if using a " - "single-metric objective." - ) if outcome_constraints: raise NotImplementedError( "Outcome constraints are not yet supported in " @@ -562,11 +658,19 @@ def __init__( # Call parent's __init__ with objective_thresholds=None super().__init__( - objective=objective, + objectives=[objective] if objective is not None else objectives, outcome_constraints=outcome_constraints, objective_thresholds=None, pruning_target_parameterization=pruning_target_parameterization, ) + # Validate that the objective is multi-objective (after super sets + # self._objectives). + if not self._objectives[0].is_multi_objective: + raise TypeError( + "`PreferenceOptimizationConfig` requires a multi-objective. " + "Use `OptimizationConfig` instead if using a " + "single-metric objective." + ) self.preference_profile_name = preference_profile_name self.expect_relativized_outcomes = expect_relativized_outcomes @@ -583,7 +687,8 @@ def is_bope_problem(self) -> bool: # pyre-ignore[14]: Inconsistent override. def clone_with_args( self, - objective: Objective | None = None, + *, + objectives: list[Objective] | None = None, preference_profile_name: str | None = None, outcome_constraints: list[OutcomeConstraint] | None = _NO_OUTCOME_CONSTRAINTS, expect_relativized_outcomes: bool | None = None, @@ -591,7 +696,11 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> PreferenceOptimizationConfig: """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + cloned_objectives = ( + [obj.clone() for obj in self._objectives] + if objectives is None + else objectives + ) preference_profile_name = ( self.preference_profile_name @@ -615,7 +724,7 @@ def clone_with_args( ) return PreferenceOptimizationConfig( - objective=objective, + objectives=cloned_objectives, preference_profile_name=preference_profile_name, outcome_constraints=outcome_constraints, expect_relativized_outcomes=expect_relativized_outcomes, diff --git a/ax/core/tests/test_derived_metric.py b/ax/core/tests/test_derived_metric.py index 3e05c279d33..2443207a69d 100644 --- a/ax/core/tests/test_derived_metric.py +++ b/ax/core/tests/test_derived_metric.py @@ -460,7 +460,7 @@ def test_two_phase_experiment_fetch(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="obj"), minimize=True), + objectives=[Objective(metric=Metric(name="obj"), minimize=True)], outcome_constraints=[ OutcomeConstraint( metric=derived, @@ -941,7 +941,7 @@ def test_experiment_integration(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="obj"), minimize=True), + objectives=[Objective(metric=Metric(name="obj"), minimize=True)], outcome_constraints=[ OutcomeConstraint( metric=derived_ratio, diff --git a/ax/core/tests/test_experiment.py b/ax/core/tests/test_experiment.py index 31432d5a3ef..64468a1b9e9 100644 --- a/ax/core/tests/test_experiment.py +++ b/ax/core/tests/test_experiment.py @@ -305,7 +305,9 @@ def test_metric_setters(self) -> None: # Add a new metric and set optimization config using it as constraint self.experiment.add_metric(Metric(name="m3")) opt_config = OptimizationConfig( - objective=Objective(expression="m1", metric_name_to_signature={"m1": "m1"}), + objectives=[ + Objective(expression="m1", metric_name_to_signature={"m1": "m1"}) + ], outcome_constraints=[ OutcomeConstraint( expression="m3 >= -0.25 * baseline", @@ -558,7 +560,9 @@ def test_optimization_config_setter(self) -> None: # Setting an opt config with an unregistered metric should raise new_opt_config = OptimizationConfig( - objective=Objective(expression="m1", metric_name_to_signature={"m1": "m1"}), + objectives=[ + Objective(expression="m1", metric_name_to_signature={"m1": "m1"}) + ], outcome_constraints=[ OutcomeConstraint( expression="unknown_metric >= 0.5", @@ -930,7 +934,7 @@ def test_attach(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="a", lower_is_better=True)) + objectives=[Objective(metric=Metric(name="a", lower_is_better=True))] ), tracking_metrics=[Metric(name="b"), Metric(name="c")], runner=SyntheticRunner(), @@ -1024,7 +1028,7 @@ def test_lookup_data(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="a", lower_is_better=True)) + objectives=[Objective(metric=Metric(name="a", lower_is_better=True))] ), tracking_metrics=[Metric(name="b"), Metric(name="c")], runner=SyntheticRunner(), @@ -1576,17 +1580,21 @@ def test_metric_summary_df(self) -> None: name="test_experiment", search_space=SearchSpace(parameters=[]), optimization_config=MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[ - Objective( - metric=Metric(name="my_objective_1", lower_is_better=True), - minimize=True, - ), - Objective( - metric=TestMetric(name="my_objective_2"), minimize=False - ), - ] - ), + objectives=[ + MultiObjective( + objectives=[ + Objective( + metric=Metric( + name="my_objective_1", lower_is_better=True + ), + minimize=True, + ), + Objective( + metric=TestMetric(name="my_objective_2"), minimize=False + ), + ] + ) + ], objective_thresholds=[ ObjectiveThreshold( metric=TestMetric(name="my_objective_2"), @@ -1676,13 +1684,15 @@ def test_metric_summary_df_scalarized_objective(self) -> None: name="test_experiment", search_space=SearchSpace(parameters=[]), optimization_config=OptimizationConfig( - objective=Objective( - expression="2*metric_a + -3*metric_b", - metric_name_to_signature={ - "metric_a": "metric_a", - "metric_b": "metric_b", - }, - ), + objectives=[ + Objective( + expression="2*metric_a + -3*metric_b", + metric_name_to_signature={ + "metric_a": "metric_a", + "metric_b": "metric_b", + }, + ) + ], ), tracking_metrics=[ Metric(name="metric_a", lower_is_better=False), diff --git a/ax/core/tests/test_multi_type_experiment.py b/ax/core/tests/test_multi_type_experiment.py index b314cfd5b75..1b351067cea 100644 --- a/ax/core/tests/test_multi_type_experiment.py +++ b/ax/core/tests/test_multi_type_experiment.py @@ -154,7 +154,7 @@ def test_setting_opt_config(self) -> None: m3 = BraninMetric("m3", ["x1", "x2"]) self.experiment.add_tracking_metric(m3) self.experiment.optimization_config = OptimizationConfig( - Objective(metric=m3, minimize=True) + objectives=[Objective(metric=m3, minimize=True)] ) self.assertDictEqual( self.experiment._metric_to_trial_type, diff --git a/ax/core/tests/test_optimization_config.py b/ax/core/tests/test_optimization_config.py index 29f253998d0..249baca0bb3 100644 --- a/ax/core/tests/test_optimization_config.py +++ b/ax/core/tests/test_optimization_config.py @@ -21,14 +21,14 @@ ScalarizedOutcomeConstraint, ) from ax.core.types import ComparisonOp -from ax.exceptions.core import UserInputError +from ax.exceptions.core import UnsupportedError, UserInputError from ax.utils.common.testutils import TestCase from pyre_extensions import assert_is_instance OC_STR = ( "OptimizationConfig(" - 'objective=Objective(expression="m1"), ' + 'objectives=[Objective(expression="m1")], ' "outcome_constraints=[OutcomeConstraint(m3 >= -0.25), " "OutcomeConstraint(m4 <= 0.25), " "ScalarizedOutcomeConstraint(0.5*m3 + 0.5*m4 >= 0.9975 * baseline)])" @@ -36,7 +36,7 @@ MOOC_STR = ( "MultiObjectiveOptimizationConfig(" - 'objective=Objective(expression="-m1, m2"), ' + 'objectives=[Objective(expression="-m1, m2")], ' "outcome_constraints=[OutcomeConstraint(m3 >= -0.25), " "OutcomeConstraint(m3 <= 0.25)], objective_thresholds=[])" ) @@ -81,33 +81,27 @@ def setUp(self) -> None: def test_Init(self) -> None: config1 = OptimizationConfig( - objective=self.objective, outcome_constraints=self.outcome_constraints + objectives=[self.objective], outcome_constraints=self.outcome_constraints ) self.assertEqual(str(config1), OC_STR) - with self.assertRaises(ValueError): - config1.objective = self.alt_objective # constrained Objective. # updating constraints is fine. config1.outcome_constraints = [self.outcome_constraint] self.assertEqual(len(config1.metric_names), 2) # objective without outcome_constraints is also supported - config2 = OptimizationConfig(objective=self.objective) + config2 = OptimizationConfig(objectives=[self.objective]) self.assertEqual(config2.outcome_constraints, []) - # setting objective is fine too, if it's compatible with constraints.. - config2.objective = self.m2_objective - # setting constraints on objectives is fine for MultiObjective components. - config2.outcome_constraints = self.outcome_constraints self.assertEqual(config2.outcome_constraints, self.outcome_constraints) def test_Eq(self) -> None: config1 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) config2 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) self.assertEqual(config1, config2) @@ -118,15 +112,15 @@ def test_Eq(self) -> None: metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.5 ) config3 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[self.outcome_constraint, new_outcome_constraint], ) self.assertNotEqual(config1, config3) def test_ConstraintValidation(self) -> None: - # Can build OptimizationConfig with MultiObjective + # Can't build OptimizationConfig with MultiObjective with self.assertRaises(ValueError): - OptimizationConfig(objective=self.multi_objective) + OptimizationConfig(objectives=[self.multi_objective]) # Can't constrain on objective metric. with warnings.catch_warnings(): @@ -136,14 +130,14 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, outcome_constraints=[objective_constraint] + objectives=[self.objective], outcome_constraints=[objective_constraint] ) # Using an outcome constraint for ScalarizedObjective should also raise with self.assertRaisesRegex( ValueError, "Cannot constrain on objective metric." ): OptimizationConfig( - objective=self.m2_objective, + objectives=[self.m2_objective], outcome_constraints=[objective_constraint], ) # Two outcome_constraints on the same metric with the same op @@ -157,7 +151,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[self.outcome_constraint, duplicate_constraint], ) @@ -172,7 +166,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints + [opposing_constraint], ) @@ -189,7 +183,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=([self.outcome_constraint, opposing_constraint]), ) @@ -204,7 +198,7 @@ def test_ConstraintValidation(self) -> None: bound=self.outcome_constraint.bound + 1, ) config = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=([self.outcome_constraint, opposing_constraint]), ) self.assertEqual( @@ -214,7 +208,7 @@ def test_ConstraintValidation(self) -> None: # Test with ScalarizedOutcomeConstraint # should work when not constraining obj config_with_scalarized = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[self.scalarized_outcome_constraint], ) self.assertEqual(len(config_with_scalarized.outcome_constraints), 1) @@ -236,24 +230,24 @@ def test_ConstraintValidation(self) -> None: ValueError, "Cannot constrain on objective metric." ): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[scalarized_with_objective_metric], ) def test_Clone(self) -> None: config1 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) self.assertEqual(config1, config1.clone()) def test_CloneWithArgs(self) -> None: config1 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) config2 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], ) # Empty args produce exact clone @@ -271,6 +265,107 @@ def test_CloneWithArgs(self) -> None: ) +class OptimizationConfigObjectivesListTest(TestCase): + """Tests for the new `OptimizationConfig(objectives=[...])` construction path.""" + + def setUp(self) -> None: + super().setUp() + self.metrics = { + "m1": Metric(name="m1"), + "m2": Metric(name="m2"), + "m3": Metric(name="m3"), + } + self.sig = {m: m for m in self.metrics} + self.obj1 = Objective(expression="m1", metric_name_to_signature=self.sig) + self.obj2 = Objective(expression="-m2", metric_name_to_signature=self.sig) + self.scalarized_obj = Objective( + expression="2*m1 + m2", metric_name_to_signature=self.sig + ) + + def test_objectives_kwarg_construction(self) -> None: + """Test single and multi-objective construction via objectives kwarg.""" + # Single objective + config = OptimizationConfig(objectives=[self.obj1]) + self.assertEqual(config.objectives, [self.obj1]) + self.assertEqual(config.objective, self.obj1) + self.assertFalse(config.is_moo_problem) + + # Multi-objective + config = OptimizationConfig(objectives=[self.obj1, self.obj2]) + self.assertEqual(config.objectives, [self.obj1, self.obj2]) + self.assertTrue(config.is_moo_problem) + with self.assertRaisesRegex(UnsupportedError, "multiple objectives"): + config.objective + + def test_objectives_kwarg_metric_aggregation(self) -> None: + """Test metric_names, metric_name_to_signature, metric_signatures.""" + constraint = OutcomeConstraint( + expression="m3 >= 0.5", metric_name_to_signature=self.sig + ) + config = OptimizationConfig( + objectives=[self.obj1, self.obj2], + outcome_constraints=[constraint], + ) + self.assertEqual(config.metric_names, {"m1", "m2", "m3"}) + self.assertEqual( + config.metric_name_to_signature, {"m1": "m1", "m2": "m2", "m3": "m3"} + ) + self.assertEqual(config.metric_signatures, {"m1", "m2", "m3"}) + + def test_objectives_kwarg_validation(self) -> None: + """Test validation errors for objectives kwarg.""" + with self.subTest("mutual_exclusivity"): + with self.assertRaisesRegex(UserInputError, "Cannot specify both"): + OptimizationConfig(objective=self.obj1, objectives=[self.obj1]) + + with self.subTest("neither_specified"): + with self.assertRaisesRegex(UserInputError, "Must specify either"): + OptimizationConfig() + + with self.subTest("empty_list"): + with self.assertRaisesRegex(UserInputError, "must not be empty"): + OptimizationConfig(objectives=[]) + + with self.subTest("multi_objective_expression"): + multi_obj = Objective( + expression="m1, -m2", metric_name_to_signature=self.sig + ) + with self.assertRaisesRegex(ValueError, "single or scalarized"): + OptimizationConfig(objectives=[multi_obj]) + + with self.subTest("duplicate_metric_names"): + obj_dup = Objective(expression="m1", metric_name_to_signature=self.sig) + with self.assertRaisesRegex(UserInputError, "appears in multiple"): + OptimizationConfig(objectives=[self.obj1, obj_dup]) + + def test_objectives_kwarg_clone_and_repr(self) -> None: + """Test clone, clone_with_args, and repr for objectives-list configs.""" + config = OptimizationConfig(objectives=[self.obj1, self.obj2]) + + # clone preserves objectives + cloned = config.clone() + self.assertEqual(len(cloned.objectives), 2) + self.assertEqual(cloned.objectives[0].expression, "m1") + self.assertEqual(cloned.objectives[1].expression, "-m2") + self.assertTrue(cloned.is_moo_problem) + + # clone_with_args(objectives=) replaces the list with a single objective + cloned = config.clone_with_args(objectives=[self.obj1]) + self.assertEqual(len(cloned.objectives), 1) + self.assertFalse(cloned.is_moo_problem) + + # clone_with_args(objectives=) replaces the list + obj3 = Objective(expression="m3", metric_name_to_signature=self.sig) + cloned = config.clone_with_args(objectives=[self.obj1, obj3]) + self.assertEqual(len(cloned.objectives), 2) + self.assertEqual(cloned.objectives[1].expression, "m3") + + # repr always uses "objectives=" + self.assertIn("objectives=", repr(config)) + single_config = OptimizationConfig(objectives=[self.obj1]) + self.assertIn("objectives=", repr(single_config)) + + class MultiObjectiveOptimizationConfigTest(TestCase): def setUp(self) -> None: super().setUp() @@ -335,7 +430,8 @@ def setUp(self) -> None: def test_Init(self) -> None: config1 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, outcome_constraints=self.outcome_constraints + objectives=[self.multi_objective], + outcome_constraints=self.outcome_constraints, ) self.assertEqual(str(config1), MOOC_STR) with self.assertRaisesRegex( @@ -350,7 +446,7 @@ def test_Init(self) -> None: self.assertEqual(len(config1.metric_names), 3) # objective without outcome_constraints is also supported - config2 = MultiObjectiveOptimizationConfig(objective=self.multi_objective) + config2 = MultiObjectiveOptimizationConfig(objectives=[self.multi_objective]) # setting objective is fine too, if it's compatible with constraints. config2.objective = self.multi_objective @@ -361,14 +457,14 @@ def test_Init(self) -> None: # construct constraints with objective_thresholds: config3 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], objective_thresholds=self.objective_thresholds, ) self.assertEqual(config3.all_constraints, self.objective_thresholds) # objective_thresholds and outcome constraints together. config4 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], objective_thresholds=self.objective_thresholds, outcome_constraints=[self.m3_constraint], ) @@ -380,7 +476,7 @@ def test_Init(self) -> None: # verify relative_objective_thresholds works: config5 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], objective_thresholds=self.relative_objective_thresholds, ) threshold = config5.objective_thresholds[0] @@ -390,7 +486,7 @@ def test_Init(self) -> None: # ValueError on wrong direction constraints with self.assertRaises(UserInputError): MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], # pyre-fixme[6]: For 2nd param expected # `Optional[List[ObjectiveThreshold]]` but got # `List[OutcomeConstraint]`. @@ -399,10 +495,12 @@ def test_Init(self) -> None: def test_Eq(self) -> None: config1 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, outcome_constraints=self.outcome_constraints + objectives=[self.multi_objective], + outcome_constraints=self.outcome_constraints, ) config2 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, outcome_constraints=self.outcome_constraints + objectives=[self.multi_objective], + outcome_constraints=self.outcome_constraints, ) self.assertEqual(config1, config2) @@ -412,7 +510,7 @@ def test_Eq(self) -> None: metric=self.metrics["m3"], op=ComparisonOp.LEQ, bound=0.5 ) config3 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=[self.outcome_constraint, new_outcome_constraint], ) self.assertNotEqual(config1, config3) @@ -425,7 +523,7 @@ def test_ConstraintValidation(self) -> None: "`MultiObjective` or `ScalarizedObjective`.", ): # pyre-fixme [6]: Incompatible parameter type - MultiObjectiveOptimizationConfig(objective=self.objective) + MultiObjectiveOptimizationConfig(objectives=[self.objective]) # Using an outcome constraint for an objective should raise with warnings.catch_warnings(): @@ -440,7 +538,7 @@ def test_ConstraintValidation(self) -> None: ValueError, "Cannot constrain on objective metric." ): MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=[outcome_constraint_m1], ) # Two outcome_constraints on the same metric with the same op @@ -454,7 +552,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=[self.outcome_constraint, duplicate_constraint], ) @@ -469,7 +567,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=self.outcome_constraints + [opposing_constraint], ) @@ -486,7 +584,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=([self.outcome_constraint, opposing_constraint]), ) @@ -501,7 +599,7 @@ def test_ConstraintValidation(self) -> None: bound=self.outcome_constraint.bound + 1, ) config = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=([self.outcome_constraint, opposing_constraint]), ) self.assertEqual( @@ -519,7 +617,7 @@ def test_ConstraintValidation(self) -> None: bound=0.0, ) config_with_scalarized = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=[scalarized_constraint], ) self.assertEqual(len(config_with_scalarized.outcome_constraints), 1) @@ -541,13 +639,14 @@ def test_ConstraintValidation(self) -> None: ValueError, "Cannot constrain on objective metric." ): MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], outcome_constraints=[scalarized_with_objective_metric], ) def test_Clone(self) -> None: config1 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, outcome_constraints=self.outcome_constraints + objectives=[self.multi_objective], + outcome_constraints=self.outcome_constraints, ) cloned1 = config1.clone() # Clone normalizes MultiObjective to plain Objective; compare by @@ -558,7 +657,7 @@ def test_Clone(self) -> None: self.assertEqual(config1.objective_thresholds, cloned1_moo.objective_thresholds) config2 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], objective_thresholds=self.objective_thresholds, ) cloned2 = config2.clone() @@ -569,12 +668,12 @@ def test_Clone(self) -> None: def test_CloneWithArgs(self) -> None: config1 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], objective_thresholds=self.objective_thresholds, outcome_constraints=self.outcome_constraints, ) config2 = MultiObjectiveOptimizationConfig( - objective=self.multi_objective, + objectives=[self.multi_objective], ) # Empty args produce clone with same expression and constraints @@ -710,7 +809,7 @@ def test_Clone(self) -> None: objectives=[self.objectives["o1"], self.objectives["o3"]] ) cloned_with_diff_objective = config.clone_with_args( - objective=different_objective + objectives=[different_objective] ) self.assertEqual( cloned_with_diff_objective.objective.expression, diff --git a/ax/core/tests/test_utils.py b/ax/core/tests/test_utils.py index af1aba3d414..75cb234ff04 100644 --- a/ax/core/tests/test_utils.py +++ b/ax/core/tests/test_utils.py @@ -888,10 +888,12 @@ def test_get_target_trial_index_excludes_lilo_trials(self) -> None: ) exp.add_tracking_metric(pairwise_metric) exp.optimization_config = OptimizationConfig( - objective=Objective( - expression=pairwise_name, - metric_name_to_signature={pairwise_name: pairwise_name}, - ), + objectives=[ + Objective( + expression=pairwise_name, + metric_name_to_signature={pairwise_name: pairwise_name}, + ) + ], ) exp.llm_messages = [LLMMessage(role="system", content="test")] @@ -947,10 +949,12 @@ def test_is_lilo_experiment(self) -> None: ) exp.add_tracking_metric(pairwise_metric) exp.optimization_config = OptimizationConfig( - objective=Objective( - expression=pairwise_name, - metric_name_to_signature={pairwise_name: pairwise_name}, - ), + objectives=[ + Objective( + expression=pairwise_name, + metric_name_to_signature={pairwise_name: pairwise_name}, + ) + ], ) self.assertFalse(is_lilo_experiment(exp)) @@ -1049,7 +1053,7 @@ def test_custom_optimization_config(self) -> None: # Custom config requiring only "branin": COMPLETE. custom_config = OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=False), + objectives=[Objective(metric=Metric(name="branin"), minimize=False)], ) result = compute_metric_availability( experiment=exp, optimization_config=custom_config @@ -1058,7 +1062,7 @@ def test_custom_optimization_config(self) -> None: # Custom config requiring an unrelated metric: INCOMPLETE. other_config = OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=False), + objectives=[Objective(metric=Metric(name="branin"), minimize=False)], outcome_constraints=[ OutcomeConstraint( metric=Metric(name="other_metric"), @@ -1118,7 +1122,7 @@ def test_curve_data(self) -> None: exp.add_metric(Metric(name="metric_a")) exp.add_metric(Metric(name="metric_b")) exp.optimization_config = OptimizationConfig( - objective=Objective(metric=Metric(name="metric_a"), minimize=False), + objectives=[Objective(metric=Metric(name="metric_a"), minimize=False)], outcome_constraints=[ OutcomeConstraint( metric=Metric(name="metric_b"), diff --git a/ax/docs/plans/2026-04-02-restrict-objective-simplify-optconfig-design.md b/ax/docs/plans/2026-04-02-restrict-objective-simplify-optconfig-design.md new file mode 100644 index 00000000000..e27d91b0032 --- /dev/null +++ b/ax/docs/plans/2026-04-02-restrict-objective-simplify-optconfig-design.md @@ -0,0 +1,280 @@ +# Design: Restrict Objective to Single/Scalarized & Simplify OptimizationConfig + +**Date:** 2026-04-02 +**Author:** Sait Cakmak +**Design Doc:** https://docs.google.com/document/d/1EGQYmBjiNGtYapXu1RLHEBdA5Yz2c7q17acX3es0yV8/edit +**Selected Option:** Option 3 -- Restrict Objective to single, possibly scalarized objective +**Prerequisite:** D98837790 (Move metric_name_to_signature from Adapter to Objective/OutcomeConstraint) + +## Summary + +This plan implements Option 3 from the design doc: restrict `Objective` to represent +a single (possibly scalarized) objective, move multi-objective representation to +`OptimizationConfig(objectives=list[Objective])`, add `threshold`/`relative_threshold` +fields to `Objective`, and deprecate `MultiObjectiveOptimizationConfig`, `MultiObjective`, +`ScalarizedObjective`, and `ObjectiveThreshold` (keeping them as deprecated shims, +removing all internal usage). + +## Diff Stack + +Each diff is backward-compatible. Summaries should reference this design doc. + +--- + +### Diff 1: Add `objectives: list[Objective]` to `OptimizationConfig` + +**Goal:** Enable the new `OptimizationConfig(objectives=[...])` construction path +without breaking any existing code. + +**Files:** +- `ax/core/optimization_config.py` +- `ax/core/tests/test_optimization_config.py` + +**Changes:** + +1. `OptimizationConfig.__init__` accepts new kwarg `objectives: list[Objective] | None = None` + - Mutually exclusive with `objective` -- raise `UserInputError` if both provided + - If `objectives` is provided, store as `self._objectives: list[Objective]` + - If `objective` is provided (existing path), wrap as `self._objectives = [objective]` + - Validate: no duplicate metric names across objectives in the list + - Validate: each objective in the list must not be multi-objective (no comma expressions) + +2. `objectives` property: returns `self._objectives` + +3. `objective` property: returns `self._objectives[0]` if `len == 1`, raises + `UserInputError("Access individual objectives via `objectives` property for + multi-objective configs.")` if `len > 1` + +4. `is_moo_problem` property: `len(self._objectives) > 1` + (replaces current `self.objective.is_multi_objective`) + +5. `metric_names` property: aggregate across all objectives + constraints + +6. `metric_name_to_signature` property: aggregate across all objectives + constraints + +7. `metric_signatures` property: aggregate across all objectives + constraints + +8. `objective_thresholds` property: filter constraints matching any objective metric name + +9. `_validate_transformed_optimization_config`: drop the "does not support + MultiObjective" error. Add validation that objectives don't share metrics + and that objective metrics aren't constrained. + +10. `clone_with_args`: support cloning with `objectives` list + +11. Tests: add construction tests for `OptimizationConfig(objectives=[obj1, obj2])`, + test `is_moo_problem`, test `objective` raises for MOO, test metric aggregation + +--- + +### Diff 2: Update `MultiObjectiveOptimizationConfig` and `PreferenceOptimizationConfig` + +**Goal:** Make MOOC use the new `objectives` list internally. Deprecate MOOC. + +**Files:** +- `ax/core/optimization_config.py` +- `ax/core/tests/test_optimization_config.py` + +**Changes:** + +1. `MOOC.__init__`: + - Accept `objectives: list[Objective] | None = None` kwarg (new path) + - If legacy `objective` kwarg is used with a comma-separated multi-objective + expression, decompose into individual objectives + - If `objective_thresholds` list is provided, resolve each threshold onto + the matching Objective (store on Objective for now as a temporary attribute, + to be replaced by `threshold` field in Diff 5) + - Emit `DeprecationWarning` for the class itself + - Call `super().__init__(objectives=objectives_list, ...)` + +2. `MOOC.objective_thresholds` property: return from stored `_objective_thresholds` + OR synthesize from objectives (backward compat) + +3. `PreferenceOptimizationConfig`: + - Accept `objectives: list[Objective] | None` kwarg + - Validate `len(objectives) > 1` instead of `isinstance(objective, MultiObjective)` + - Deprecation warning for passing `objective` with multi-objective expression + +4. Tests: update MOOC and PreferenceOptimizationConfig tests + +--- + +### Diff 3: Restrict `Objective` to single/scalarized expressions + +**Goal:** Make `Objective` reject comma-separated (multi-objective) expressions. +Safe because Diffs 1-2 provide the `objectives=[...]` alternative. + +**Files:** +- `ax/core/objective.py` +- `ax/core/tests/test_objective.py` +- Any internal callers that construct comma-separated Objectives (update to use + `OptimizationConfig(objectives=[...])`) + +**Changes:** + +1. `Objective.__init__`: after parsing the expression, if it contains commas + (i.e., `parse_objective_expression` returns a tuple), raise `UserInputError` + with migration guidance to use `OptimizationConfig(objectives=[...])`. + +2. Remove `is_multi_objective` property (always False now, no longer meaningful). + Add a deprecated shim that warns and returns False. + +3. Remove `is_single_objective` property (redundant -- it's always `not is_scalarized`). + Add a deprecated shim. + +4. `MultiObjective.__init__`: raise `NotImplementedError` with message: + "MultiObjective is removed. Use OptimizationConfig(objectives=[...]) instead." + +5. Update all internal callers that construct `Objective(expression="acc, -loss")` + to use `OptimizationConfig(objectives=[Objective("acc"), Objective("-loss")])`. + Key files: + - `ax/core/optimization_config.py` (MOOC validation) + - `ax/core/experiment.py` + - `ax/adapter/adapter_utils.py` + - `ax/storage/json_store/decoder.py` + - `ax/storage/sqa_store/encoder.py` / `decoder.py` + +6. Tests: verify comma expressions raise, update multi-objective test construction + +--- + +### Diff 4: Migrate `isinstance(_, MOOC)` checks to `is_moo_problem` + +**Goal:** Mechanical replacement of isinstance checks. Large but safe. + +**Files:** ~24 files across adapter/, service/, benchmark/, analysis/, +generators/, storage/, early_stopping/, global_stopping/, fb/ + +**Changes:** + +Replace all `isinstance(opt_config, MultiObjectiveOptimizationConfig)` with +`opt_config.is_moo_problem`. Approximately 30 occurrences. + +Key files (non-exhaustive): +- `ax/adapter/torch.py` (2 occurrences) +- `ax/adapter/adapter_utils.py` (1) +- `ax/adapter/transforms/objective_as_constraint.py` (2) +- `ax/adapter/transforms/standardize_y.py` (1) +- `ax/adapter/transforms/relativize.py` (1) +- `ax/adapter/transforms/derelativize.py` (1) +- `ax/adapter/transforms/power_transform_y.py` (1) +- `ax/adapter/transforms/stratified_standardize_y.py` (1) +- `ax/adapter/transforms/log_y.py` (1) +- `ax/service/utils/best_point.py` (2) +- `ax/benchmark/benchmark_problem.py` (3) +- `ax/benchmark/benchmark.py` (1) +- `ax/core/experiment.py` (1) +- `ax/storage/json_store/encoders.py` (1) +- `ax/storage/sqa_store/encoder.py` (1) +- `ax/global_stopping/strategies/improvement.py` (1) +- `ax/analysis/plotly/objective_p_feasible_frontier.py` (1) +- `ax/analysis/healthcheck/early_stopping_healthcheck.py` (1) +- `ax/early_stopping/dispatch.py` (1) +- `ax/fb/early_stopping/strategies/multi_objective.py` (1) + +Also migrate remaining `isinstance(_, MultiObjective)` checks (~4 in production) +to `objective.is_multi_objective` (which is now deprecated) or the new +`opt_config.is_moo_problem`. + +**Consider splitting** into sub-diffs by module if > 500 lines. + +--- + +### Diff 5: Add `threshold` and `relative_threshold` to `Objective` + +**Goal:** Co-locate objective thresholds with their objectives. + +**Files:** +- `ax/core/objective.py` +- `ax/core/optimization_config.py` +- `ax/core/tests/test_objective.py` +- `ax/core/tests/test_optimization_config.py` +- `ax/storage/json_store/encoders.py` +- `ax/storage/json_store/decoder.py` +- `ax/storage/sqa_store/encoder.py` +- `ax/storage/sqa_store/decoder.py` +- `ax/storage/sqa_store/sqa_classes.py` (if SQA columns needed) + +**Changes:** + +1. `Objective.__init__` accepts `threshold: float | None = None` and + `relative_threshold: float | None = None` + - Store as `self._threshold` and `self._relative_threshold` + - Properties with getters/setters + +2. `OptimizationConfig.objective_thresholds` property: synthesize + `OutcomeConstraint` objects from each objective's threshold/relative_threshold + (for downstream compat with adapter layer's `extract_objective_thresholds`) + +3. `MOOC.__init__`: when `objective_thresholds` list is provided, resolve each + `OutcomeConstraint` to the matching `Objective.threshold` (or + `relative_threshold`). Validate no conflicts. + +4. `Objective.clone()`: preserve threshold fields + +5. Storage: + - JSON: add `"threshold"` and `"relative_threshold"` to `objective_to_dict`. + Decoder: read these fields, defaulting to `None` for old data. + - SQA: add nullable columns or store in `properties` dict. + +6. Tests: construction, serialization round-trip, MOOC threshold resolution + +--- + +### Diff 6: Cleanup -- Remove internal usage of deprecated classes + +**Goal:** All internal Ax code uses the new patterns. Deprecated classes remain +as shims for external consumers. + +**Files:** Broad -- all files that import/use `MultiObjectiveOptimizationConfig`, +`MultiObjective`, `ScalarizedObjective`, `ObjectiveThreshold`. + +**Changes:** + +1. Replace all internal construction of `MOOC(...)` with + `OptimizationConfig(objectives=[...])` + +2. Replace all internal construction of `MultiObjective([...])` with + individual `Objective` instances in a list + +3. Replace all internal construction of `ObjectiveThreshold(...)` with + `Objective(..., threshold=...)` or `OutcomeConstraint(...)` + +4. Remove internal imports of deprecated classes (keep re-exports for external compat) + +5. Strengthen deprecation warnings (add removal timeline) + +6. Clean up dead code paths, unused helper functions + +**Consider splitting** into sub-diffs: core, adapter, service, storage, benchmark, +analysis, fb. + +--- + +## Key Design Decisions + +1. **`objectives` list on base `OptimizationConfig`** -- not a separate class. + Multi-objective is a property (`is_moo_problem`), not a type. + +2. **`objective` property raises for MOO** -- forces callers to use `objectives` + for multi-objective, preventing silent bugs from accessing only the first objective. + +3. **Thresholds on `Objective`** -- `threshold` (absolute) and `relative_threshold` + (percent change from status quo). When both are set, the more stringent one + is used after un-relativization. + +4. **Deprecated classes kept as shims** -- `MultiObjective`, `ScalarizedObjective`, + `ObjectiveThreshold`, `MultiObjectiveOptimizationConfig` remain importable but + emit deprecation warnings. Internal usage is removed. + +5. **Backward-compatible storage** -- old serialized data (without `objectives` list + or `threshold` fields) deserializes correctly via fallback paths. + +## Risks and Mitigations + +- **Large surface area:** ~70 files reference these classes. Mitigated by splitting + into 6+ focused diffs and running full test suites. +- **Storage backward compat:** Old experiments must still load. Mitigated by + keeping decoder fallback paths and testing with existing fixtures. +- **External consumers:** Meta-internal code outside ax/ may use deprecated classes. + Mitigated by keeping shims and using deprecation warnings before removal. diff --git a/ax/early_stopping/experiment_replay.py b/ax/early_stopping/experiment_replay.py index a6b81b7226e..e9cc96dcc7c 100644 --- a/ax/early_stopping/experiment_replay.py +++ b/ax/early_stopping/experiment_replay.py @@ -69,7 +69,7 @@ def replay_experiment( lower_is_better=metric.lower_is_better, ) optimization_config = OptimizationConfig( - objective=Objective(metric=replay_metric), + objectives=[Objective(metric=replay_metric)], ) runner = MapDataReplayRunner(replay_metric=replay_metric) diff --git a/ax/early_stopping/tests/test_strategies.py b/ax/early_stopping/tests/test_strategies.py index 0af83a47b05..a3f437e443f 100644 --- a/ax/early_stopping/tests/test_strategies.py +++ b/ax/early_stopping/tests/test_strategies.py @@ -300,13 +300,15 @@ def test_all_objectives_and_directions_scalarized(self) -> None: test_experiment.add_tracking_metric(metric_b) # Scalarized objective: maximize metric_a, minimize metric_b test_experiment._optimization_config = OptimizationConfig( - objective=Objective( - expression="2*metric_a + -3*metric_b", - metric_name_to_signature={ - "metric_a": "metric_a", - "metric_b": "metric_b", - }, - ), + objectives=[ + Objective( + expression="2*metric_a + -3*metric_b", + metric_name_to_signature={ + "metric_a": "metric_a", + "metric_b": "metric_b", + }, + ) + ], ) es_strategy = FakeStrategy() directions = es_strategy._all_objectives_and_directions( diff --git a/ax/generation_strategy/tests/test_dispatch_utils.py b/ax/generation_strategy/tests/test_dispatch_utils.py index 8620dd82bd3..5f6d0643dc3 100644 --- a/ax/generation_strategy/tests/test_dispatch_utils.py +++ b/ax/generation_strategy/tests/test_dispatch_utils.py @@ -167,13 +167,15 @@ def test_choose_generation_strategy_legacy(self) -> None: ) with self.subTest("MOO"): optimization_config = MultiObjectiveOptimizationConfig( - objective=Objective( - expression="branin_a, branin_b", - metric_name_to_signature={ - "branin_a": "branin_a", - "branin_b": "branin_b", - }, - ) + objectives=[ + Objective( + expression="branin_a, branin_b", + metric_name_to_signature={ + "branin_a": "branin_a", + "branin_b": "branin_b", + }, + ) + ] ) sobol_gpei = choose_generation_strategy_legacy( search_space=get_branin_search_space(), @@ -344,13 +346,15 @@ def test_choose_generation_strategy_legacy(self) -> None: search_space = get_branin_search_space(with_choice_parameter=True) search_space.parameters["x2"]._is_ordered = False optimization_config = MultiObjectiveOptimizationConfig( - objective=Objective( - expression="branin_a, branin_b", - metric_name_to_signature={ - "branin_a": "branin_a", - "branin_b": "branin_b", - }, - ) + objectives=[ + Objective( + expression="branin_a, branin_b", + metric_name_to_signature={ + "branin_a": "branin_a", + "branin_b": "branin_b", + }, + ) + ] ) moo_mixed = choose_generation_strategy_legacy( search_space=search_space, @@ -422,13 +426,15 @@ def test_choose_generation_strategy_legacy(self) -> None: num_initialization_trials=3, use_saasbo=True, optimization_config=MultiObjectiveOptimizationConfig( - objective=Objective( - expression="branin_a, branin_b", - metric_name_to_signature={ - "branin_a": "branin_a", - "branin_b": "branin_b", - }, - ) + objectives=[ + Objective( + expression="branin_a, branin_b", + metric_name_to_signature={ + "branin_a": "branin_a", + "branin_b": "branin_b", + }, + ) + ] ), ) self.assertEqual( diff --git a/ax/generation_strategy/tests/test_generation_strategy.py b/ax/generation_strategy/tests/test_generation_strategy.py index 190d73b54bc..00062f61005 100644 --- a/ax/generation_strategy/tests/test_generation_strategy.py +++ b/ax/generation_strategy/tests/test_generation_strategy.py @@ -193,10 +193,12 @@ def test_gen_with_parameter_constraints(self) -> None: name="test_choice_constraint", search_space=search_space, optimization_config=OptimizationConfig( - objective=Objective( - metric=BraninMetric(name="branin", param_names=["x1", "x2"]), - minimize=True, - ), + objectives=[ + Objective( + metric=BraninMetric(name="branin", param_names=["x1", "x2"]), + minimize=True, + ) + ], ), ) diff --git a/ax/global_stopping/tests/test_strategies.py b/ax/global_stopping/tests/test_strategies.py index e9a5daab958..b70430c3f76 100644 --- a/ax/global_stopping/tests/test_strategies.py +++ b/ax/global_stopping/tests/test_strategies.py @@ -176,7 +176,7 @@ def _create_single_objective_experiment( ), ] optimization_config = OptimizationConfig( - objective=objective, outcome_constraints=outcome_constraints + objectives=[objective], outcome_constraints=outcome_constraints ) exp = Experiment( name="test_experiment", @@ -244,7 +244,7 @@ def _create_multi_objective_experiment( ), ] optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective(objectives), + objectives=[MultiObjective(objectives)], outcome_constraints=outcome_constraints, # pyre-ignore[6]: ObjectiveThreshold is a subclass of OutcomeConstraint; # list invariance prevents direct assignment. @@ -377,10 +377,12 @@ def test_scalarized_objective_raises(self) -> None: ] exp = self._create_single_objective_experiment(metric_values=metric_values) exp._optimization_config = OptimizationConfig( - objective=Objective( - expression="2*m1 + -3*m4", - metric_name_to_signature={"m1": "m1", "m4": "m4"}, - ), + objectives=[ + Objective( + expression="2*m1 + -3*m4", + metric_name_to_signature={"m1": "m1", "m4": "m4"}, + ) + ], outcome_constraints=none_throws( exp.optimization_config ).outcome_constraints, @@ -402,7 +404,7 @@ def test_scalarized_outcome_constraint_raises(self) -> None: ] exp = self._create_single_objective_experiment(metric_values=metric_values) exp._optimization_config = OptimizationConfig( - objective=Objective(metric=Metric(name="m1"), minimize=False), + objectives=[Objective(metric=Metric(name="m1"), minimize=False)], outcome_constraints=[ OutcomeConstraint( expression="1.0*m2 + 1.0*m3 <= 0.5", diff --git a/ax/metrics/tests/test_map_replay.py b/ax/metrics/tests/test_map_replay.py index c2957bd0ae5..430a0d03e6b 100644 --- a/ax/metrics/tests/test_map_replay.py +++ b/ax/metrics/tests/test_map_replay.py @@ -48,10 +48,12 @@ def test_map_replay(self) -> None: name="dummy_experiment", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective( - metric=replay_metric, - minimize=True, - ) + objectives=[ + Objective( + metric=replay_metric, + minimize=True, + ) + ] ), tracking_metrics=[replay_metric], runner=SyntheticRunner(), @@ -110,10 +112,12 @@ def test_map_replay_non_uniform(self) -> None: name="dummy_experiment", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective( - metric=replay_metric, - minimize=True, - ) + objectives=[ + Objective( + metric=replay_metric, + minimize=True, + ) + ] ), tracking_metrics=[replay_metric], runner=SyntheticRunner(), diff --git a/ax/orchestration/tests/test_orchestrator.py b/ax/orchestration/tests/test_orchestrator.py index 023f970fca6..2c1898a0e5a 100644 --- a/ax/orchestration/tests/test_orchestrator.py +++ b/ax/orchestration/tests/test_orchestrator.py @@ -205,7 +205,7 @@ def setUp(self) -> None: self.branin_experiment_no_impl_runner_or_metrics = Experiment( search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=False) + objectives=[Objective(metric=Metric(name="branin"), minimize=False)] ), name="branin_experiment_no_impl_runner_or_metrics", ) @@ -2727,12 +2727,14 @@ def test_generate_candidates_does_not_generate_if_missing_data(self) -> None: ) self.branin_experiment.add_tracking_metric(custom_metric) self.branin_experiment.optimization_config = OptimizationConfig( - Objective( - metric=CustomTestMetric( - name="custom_test_metric", test_attribute="test" - ), - minimize=False, - ) + objectives=[ + Objective( + metric=CustomTestMetric( + name="custom_test_metric", test_attribute="test" + ), + minimize=False, + ) + ] ) gs = get_online_sobol_mbm_generation_strategy() self.branin_experiment.runner = InfinitePollRunner() @@ -2950,7 +2952,9 @@ def setUp(self) -> None: self.branin_experiment.name = "branin_test_experiment" self.branin_experiment.update_metric(BraninMetric("m1", ["x1", "x2"])) self.branin_experiment.optimization_config = OptimizationConfig( - objective=Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) + objectives=[ + Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) + ] ) self.runner = SyntheticRunnerWithStatusPolling() @@ -2962,9 +2966,9 @@ def setUp(self) -> None: ) self.branin_timestamp_map_metric_experiment.optimization_config = ( OptimizationConfig( - objective=Objective( - metric=get_map_metric(name="branin_map"), minimize=True - ) + objectives=[ + Objective(metric=get_map_metric(name="branin_map"), minimize=True) + ] ) ) self.branin_timestamp_map_metric_experiment.update_runner( @@ -2974,7 +2978,7 @@ def setUp(self) -> None: self.branin_experiment_no_impl_runner_or_metrics = MultiTypeExperiment( search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - Objective(metric=Metric(name="branin"), minimize=True) + objectives=[Objective(metric=Metric(name="branin"), minimize=True)] ), default_trial_type="type1", default_runner=None, diff --git a/ax/plot/pareto_utils.py b/ax/plot/pareto_utils.py index 742c3e7ebd8..04e84a71a06 100644 --- a/ax/plot/pareto_utils.py +++ b/ax/plot/pareto_utils.py @@ -510,6 +510,6 @@ def _build_scalarized_optimization_config( minimize=False, ) optimization_config = MultiObjectiveOptimizationConfig( - objective=obj, outcome_constraints=outcome_constraints + objectives=[obj], outcome_constraints=outcome_constraints ) return optimization_config diff --git a/ax/plot/tests/test_pareto_utils.py b/ax/plot/tests/test_pareto_utils.py index 739cd58ca57..b33dd264c02 100644 --- a/ax/plot/tests/test_pareto_utils.py +++ b/ax/plot/tests/test_pareto_utils.py @@ -86,7 +86,7 @@ def test_get_observed_pareto_frontiers(self) -> None: ] objective = MultiObjective(objectives=objectives) optimization_config = MultiObjectiveOptimizationConfig( - objective=objective, + objectives=[objective], # pyre-ignore[6]: ObjectiveThreshold is a subclass of OutcomeConstraint; # list invariance prevents direct assignment. objective_thresholds=objective_thresholds, diff --git a/ax/runners/tests/test_map_replay.py b/ax/runners/tests/test_map_replay.py index 5a9b6d8f2a1..5ad3f671449 100644 --- a/ax/runners/tests/test_map_replay.py +++ b/ax/runners/tests/test_map_replay.py @@ -39,10 +39,12 @@ def test_map_replay(self) -> None: name="dummy_experiment", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective( - metric=metric, - minimize=True, - ) + objectives=[ + Objective( + metric=metric, + minimize=True, + ) + ] ), runner=runner, tracking_metrics=[metric], diff --git a/ax/runners/tests/test_torchx.py b/ax/runners/tests/test_torchx.py index cf369fbe590..0bba5f8fe7f 100644 --- a/ax/runners/tests/test_torchx.py +++ b/ax/runners/tests/test_torchx.py @@ -80,7 +80,7 @@ def test_run_experiment_locally(self) -> None: experiment = Experiment( name="torchx_booth_sequential_demo", search_space=SearchSpace(parameters=self._parameters), - optimization_config=OptimizationConfig(objective=self._objective), + optimization_config=OptimizationConfig(objectives=[self._objective]), runner=self._runner, is_test=True, properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True}, @@ -112,7 +112,7 @@ def test_stop_trials(self) -> None: experiment = Experiment( name="torchx_booth_sequential_demo", search_space=SearchSpace(parameters=self._parameters), - optimization_config=OptimizationConfig(objective=self._objective), + optimization_config=OptimizationConfig(objectives=[self._objective]), runner=self._runner, is_test=True, properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True}, @@ -150,7 +150,7 @@ def test_run_experiment_locally_in_batches(self) -> None: experiment = Experiment( name="torchx_booth_parallel_demo", search_space=SearchSpace(parameters=self._parameters), - optimization_config=OptimizationConfig(objective=self._objective), + optimization_config=OptimizationConfig(objectives=[self._objective]), runner=self._runner, is_test=True, properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True}, @@ -184,7 +184,7 @@ def test_runner_no_batch_trials(self) -> None: experiment = Experiment( name="runner_test", search_space=SearchSpace(parameters=self._parameters), - optimization_config=OptimizationConfig(objective=self._objective), + optimization_config=OptimizationConfig(objectives=[self._objective]), runner=self._runner, is_test=True, tracking_metrics=[self._metric], diff --git a/ax/service/tests/test_ax_client.py b/ax/service/tests/test_ax_client.py index 386f508165b..049ac0edb0c 100644 --- a/ax/service/tests/test_ax_client.py +++ b/ax/service/tests/test_ax_client.py @@ -3394,13 +3394,15 @@ def test_get_optimization_trace_scalarized(self) -> None: ax_client.complete_trial(trial_index=idx, raw_data={"branin": (1.0, 0.0)}) ax_client.experiment.add_tracking_metric(Metric(name="other_metric")) ax_client.experiment._optimization_config = OptimizationConfig( - objective=Objective( - expression="2*branin + -1*other_metric", - metric_name_to_signature={ - "branin": "branin", - "other_metric": "other_metric", - }, - ), + objectives=[ + Objective( + expression="2*branin + -1*other_metric", + metric_name_to_signature={ + "branin": "branin", + "other_metric": "other_metric", + }, + ) + ], ) with self.assertRaisesRegex(UnsupportedError, "not supported for scalarized"): ax_client.get_optimization_trace() @@ -3414,7 +3416,7 @@ def test_get_optimization_trace_scalarized_outcome_constraint(self) -> None: ax_client.experiment.add_tracking_metric(Metric(name="m1")) ax_client.experiment.add_tracking_metric(Metric(name="m2")) ax_client.experiment._optimization_config = OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=True), + objectives=[Objective(metric=Metric(name="branin"), minimize=True)], outcome_constraints=[ OutcomeConstraint( expression="2*m1 + 3*m2 <= 10", diff --git a/ax/service/tests/test_best_point.py b/ax/service/tests/test_best_point.py index 03393107221..e6cb784b6da 100644 --- a/ax/service/tests/test_best_point.py +++ b/ax/service/tests/test_best_point.py @@ -60,12 +60,14 @@ def test_get_trace(self) -> None: self.assertEqual(get_trace(exp), [11, 10, 9, 9, 5]) # Same experiment with maximize via new optimization config. - opt_conf = none_throws(exp.optimization_config).clone() - opt_conf.objective = Objective( - expression=opt_conf.objective.metric_names[0], - metric_name_to_signature={ - opt_conf.objective.metric_names[0]: opt_conf.objective.metric_names[0] - }, + metric_name = none_throws(exp.optimization_config).objective.metric_names[0] + opt_conf = none_throws(exp.optimization_config).clone_with_args( + objectives=[ + Objective( + expression=metric_name, + metric_name_to_signature={metric_name: metric_name}, + ) + ], ) self.assertEqual(get_trace(exp, opt_conf), [11, 11, 11, 15, 15]) @@ -441,12 +443,14 @@ def test_get_best_observed_value(self) -> None: ) self.assertEqual(get_best(exp), 5) # Same experiment with maximize via new optimization config. - opt_conf = none_throws(exp.optimization_config).clone() - opt_conf.objective = Objective( - expression=opt_conf.objective.metric_names[0], - metric_name_to_signature={ - opt_conf.objective.metric_names[0]: opt_conf.objective.metric_names[0] - }, + metric_name = none_throws(exp.optimization_config).objective.metric_names[0] + opt_conf = none_throws(exp.optimization_config).clone_with_args( + objectives=[ + Objective( + expression=metric_name, + metric_name_to_signature={metric_name: metric_name}, + ) + ], ) self.assertEqual(get_best(exp, opt_conf), 15) @@ -820,10 +824,12 @@ def test_get_trace_by_progression_scalarized(self) -> None: """_get_trace_by_progression raises UnsupportedError for scalarized.""" experiment = get_experiment_with_trial() experiment._optimization_config = OptimizationConfig( - objective=Objective( - expression="2*m1 + -1*m2", - metric_name_to_signature={"m1": "m1", "m2": "m2"}, - ), + objectives=[ + Objective( + expression="2*m1 + -1*m2", + metric_name_to_signature={"m1": "m1", "m2": "m2"}, + ) + ], ) with self.assertRaisesRegex(UnsupportedError, "not supported for scalarized"): BestPointMixin._get_trace_by_progression(experiment=experiment) @@ -832,10 +838,12 @@ def test_get_improvement_over_baseline_scalarized(self) -> None: """get_improvement_over_baseline raises UnsupportedError for scalarized.""" experiment = get_experiment_with_trial() experiment._optimization_config = OptimizationConfig( - objective=Objective( - expression="2*m1 + -1*m2", - metric_name_to_signature={"m1": "m1", "m2": "m2"}, - ), + objectives=[ + Objective( + expression="2*m1 + -1*m2", + metric_name_to_signature={"m1": "m1", "m2": "m2"}, + ) + ], ) mixin = BestPointMixin.__new__(BestPointMixin) with self.assertRaisesRegex(UnsupportedError, "not supported for scalarized"): diff --git a/ax/service/tests/test_best_point_utils.py b/ax/service/tests/test_best_point_utils.py index 4101f2fd6d9..326b2e10fbb 100644 --- a/ax/service/tests/test_best_point_utils.py +++ b/ax/service/tests/test_best_point_utils.py @@ -147,7 +147,7 @@ def test_get_hypervolume_trace_of_outcomes_multi_objective(self) -> None: ) with self.subTest("Relative objective thresholds not supported"): optimization_config = MultiObjectiveOptimizationConfig( - objective=objective, + objectives=[objective], objective_thresholds=[ ObjectiveThreshold( metric=Metric("m1"), @@ -167,7 +167,7 @@ def test_get_hypervolume_trace_of_outcomes_multi_objective(self) -> None: ) optimization_config = MultiObjectiveOptimizationConfig( - objective=objective, + objectives=[objective], ) with self.subTest("Cumulative HV"): hvs = get_hypervolume_trace_of_outcomes_multi_objective( @@ -199,7 +199,7 @@ def test_get_hypervolume_trace_minimization_inferred_thresholds(self) -> None: ], ) optimization_config = MultiObjectiveOptimizationConfig( - objective=objective, + objectives=[objective], ) df_wide = pd.DataFrame.from_records( [ @@ -223,7 +223,7 @@ def test_get_hypervolume_trace_minimization_inferred_thresholds(self) -> None: def test_get_trace_by_arm_pull_from_data(self) -> None: objective = Objective(metric=Metric("m1"), minimize=False) optimzation_config = OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=[ OutcomeConstraint( metric=Metric("m2"), @@ -322,7 +322,7 @@ def test_get_trace_by_arm_pull_from_data(self) -> None: with self.subTest("Relative optimization config not supported"): rel_optimization_config = OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=[ OutcomeConstraint( metric=Metric("m2"), @@ -356,12 +356,14 @@ def test_get_trace_by_arm_pull_from_data(self) -> None: self.assertEqual(result["value"].tolist(), [1.0, float("-inf"), 3.0]) moo_opt_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[ - Objective(metric=Metric("m1"), minimize=False), - Objective(metric=Metric("m2"), minimize=False), - ], - ), + objectives=[ + MultiObjective( + objectives=[ + Objective(metric=Metric("m1"), minimize=False), + Objective(metric=Metric("m2"), minimize=False), + ], + ) + ], ) # reference point inferred via infer_reference_point on Pareto front with self.subTest("Multi-objective, cumulative"): @@ -501,7 +503,7 @@ def test_best_raw_objective_point(self) -> None: with self.subTest("Data present but not for needed metrics"): opt_conf = OptimizationConfig( - objective=Objective(metric=get_branin_metric(name="not_branin")) + objectives=[Objective(metric=get_branin_metric(name="not_branin"))] ) with self.assertRaisesRegex( ValueError, "Some metrics are not present for all trials and arms" @@ -615,7 +617,9 @@ def test_best_raw_objective_point_scalarized(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + objectives=[ + ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + ], ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(exp) @@ -637,11 +641,16 @@ def test_best_raw_objective_point_scalarized_multi(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - ScalarizedObjective( - metrics=[get_branin_metric(), get_branin_metric(lower_is_better=False)], - weights=[0.1, -0.9], - minimize=True, - ) + objectives=[ + ScalarizedObjective( + metrics=[ + get_branin_metric(), + get_branin_metric(lower_is_better=False), + ], + weights=[0.1, -0.9], + minimize=True, + ) + ], ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(experiment=exp) @@ -1037,11 +1046,13 @@ def test_best_parameters_from_model_predictions_scalarized(self) -> None: ) exp.add_tracking_metric(metric2) exp.optimization_config = OptimizationConfig( - ScalarizedObjective( - metrics=[metric1, metric2], - weights=[0.5, 0.5], - minimize=True, - ) + objectives=[ + ScalarizedObjective( + metrics=[metric1, metric2], + weights=[0.5, 0.5], + minimize=True, + ) + ], ) # Run trials and generate data @@ -1181,11 +1192,13 @@ def test_get_best_trial_with_scalarized_objective(self) -> None: ) exp.add_tracking_metric(metric2) exp.optimization_config = OptimizationConfig( - objective=ScalarizedObjective( - metrics=[metric1, metric2], - weights=[0.5, 0.5], - minimize=True, - ) + objectives=[ + ScalarizedObjective( + metrics=[metric1, metric2], + weights=[0.5, 0.5], + minimize=True, + ) + ] ) # Run trials and generate data diff --git a/ax/service/tests/test_report_utils.py b/ax/service/tests/test_report_utils.py index 0733f65cd0c..2ca2a60d20c 100644 --- a/ax/service/tests/test_report_utils.py +++ b/ax/service/tests/test_report_utils.py @@ -449,10 +449,12 @@ def _test_get_standard_plots_moo_relative_constraints( names = obj.metric_names # Create a new Objective rather than mutating _expression_str to # avoid stale _parsed cached_property. - none_throws(exp.optimization_config)._objective = Objective( - expression=f"{names[0]}, -{names[1]}", - metric_name_to_signature={n: n for n in names}, - ) + none_throws(exp.optimization_config)._objectives = [ + Objective( + expression=f"{names[0]}, -{names[1]}", + metric_name_to_signature={n: n for n in names}, + ) + ] exp.get_metric(names[0]).lower_is_better = False assert_is_instance( exp.optimization_config, MultiObjectiveOptimizationConfig @@ -494,10 +496,12 @@ def test_get_standard_plots_moo_no_objective_thresholds(self) -> None: # first objective to maximize, second to minimize obj = none_throws(exp.optimization_config).objective names = obj.metric_names - none_throws(exp.optimization_config)._objective = Objective( - expression=f"{names[0]}, -{names[1]}", - metric_name_to_signature={n: n for n in names}, - ) + none_throws(exp.optimization_config)._objectives = [ + Objective( + expression=f"{names[0]}, -{names[1]}", + metric_name_to_signature={n: n for n in names}, + ) + ] exp.trials[0].run() plots = get_standard_plots( experiment=exp, @@ -580,15 +584,17 @@ def test_skip_contour_high_dimensional(self) -> None: def test_get_metric_name_pairs(self) -> None: exp = get_branin_experiment(with_trial=True) exp._optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[ - Objective(metric=Metric("m0"), minimize=False), - Objective(metric=Metric("m1"), minimize=False), - Objective(metric=Metric("m2"), minimize=False), - Objective(metric=Metric("m3"), minimize=False), - Objective(metric=Metric("m4"), minimize=False), - ] - ) + objectives=[ + MultiObjective( + objectives=[ + Objective(metric=Metric("m0"), minimize=False), + Objective(metric=Metric("m1"), minimize=False), + Objective(metric=Metric("m2"), minimize=False), + Objective(metric=Metric("m3"), minimize=False), + Objective(metric=Metric("m4"), minimize=False), + ] + ) + ] ) with self.assertLogs(logger="ax", level=INFO) as log: metric_name_pairs = _get_metric_name_pairs(experiment=exp) @@ -759,10 +765,12 @@ def test_get_objective_trace_plot_scalarized(self) -> None: exp = get_branin_experiment(with_completed_trial=True) exp.add_tracking_metric(get_branin_metric(name="branin2")) exp._optimization_config = OptimizationConfig( - objective=Objective( - expression="2*branin + -1*branin2", - metric_name_to_signature={"branin": "branin", "branin2": "branin2"}, - ), + objectives=[ + Objective( + expression="2*branin + -1*branin2", + metric_name_to_signature={"branin": "branin", "branin2": "branin2"}, + ) + ], ) with self.assertRaisesRegex(UnsupportedError, "not supported for scalarized"): _get_objective_trace_plot(experiment=exp) @@ -775,13 +783,15 @@ def test_maybe_extract_baseline_comparison_values_scalarized(self) -> None: exp.fetch_data() arm_names = list(exp.arms_by_name.keys()) exp._optimization_config = OptimizationConfig( - objective=Objective( - expression="2*branin_a + -1*branin_b", - metric_name_to_signature={ - "branin_a": "branin_a", - "branin_b": "branin_b", - }, - ), + objectives=[ + Objective( + expression="2*branin_a + -1*branin_b", + metric_name_to_signature={ + "branin_a": "branin_a", + "branin_b": "branin_b", + }, + ) + ], ) with self.assertRaisesRegex(UnsupportedError, "not supported for scalarized"): maybe_extract_baseline_comparison_values( diff --git a/ax/service/utils/best_point.py b/ax/service/utils/best_point.py index 0b43001a4a5..387d8398677 100644 --- a/ax/service/utils/best_point.py +++ b/ax/service/utils/best_point.py @@ -761,12 +761,14 @@ def get_hypervolume_trace_of_outcomes_multi_objective( Example: >>> optimization_config = MultiObjectiveOptimizationConfig( - ... objective=MultiObjective( - ... objectives=[ - ... Objective(metric=Metric(name="m1"), minimize=False), - ... Objective(metric=Metric(name="m2"), minimize=False), - ... ] - ... ), + ... objectives=[ + ... MultiObjective( + ... objectives=[ + ... Objective(metric=Metric(name="m1"), minimize=False), + ... Objective(metric=Metric(name="m2"), minimize=False), + ... ] + ... ) + ... ], ... objective_thresholds=[ ... ObjectiveThreshold( ... metric=Metric(name="m1"), diff --git a/ax/service/utils/instantiation.py b/ax/service/utils/instantiation.py index 0b9f017cf76..d4ea393cae3 100644 --- a/ax/service/utils/instantiation.py +++ b/ax/service/utils/instantiation.py @@ -626,7 +626,7 @@ def optimization_config_from_objectives( "thresholds." ) return OptimizationConfig( - objective=objectives[0], + objectives=[objectives[0]], outcome_constraints=outcome_constraints, ) @@ -638,7 +638,7 @@ def optimization_config_from_objectives( ) return MultiObjectiveOptimizationConfig( - objective=MultiObjective(objectives=objectives), + objectives=[MultiObjective(objectives=objectives)], outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, ) diff --git a/ax/storage/json_store/decoder.py b/ax/storage/json_store/decoder.py index 4259a071c17..85a3f7b5a62 100644 --- a/ax/storage/json_store/decoder.py +++ b/ax/storage/json_store/decoder.py @@ -351,6 +351,13 @@ def object_from_json( object_json = _sanitize_inputs_to_surrogate_spec(object_json=object_json) if isclass(_class) and issubclass(_class, OptimizationConfig): object_json.pop("risk_measure", None) # Deprecated. + # Backward compat: old JSON uses "objective", new uses "objectives". + if ( + _class is OptimizationConfig + and "objective" in object_json + and "objectives" not in object_json + ): + object_json["objectives"] = [object_json.pop("objective")] return ax_class_from_json_dict( _class=_class, object_json=object_json, **vars(registry_kwargs) ) diff --git a/ax/storage/json_store/encoders.py b/ax/storage/json_store/encoders.py index d6f960ffdcc..024f04e9bcc 100644 --- a/ax/storage/json_store/encoders.py +++ b/ax/storage/json_store/encoders.py @@ -383,7 +383,7 @@ def optimization_config_to_dict( """Convert Ax optimization config to a dictionary.""" return { "__type": optimization_config.__class__.__name__, - "objective": optimization_config.objective, + "objectives": optimization_config.objectives, "outcome_constraints": optimization_config.outcome_constraints, "pruning_target_parameterization": ( optimization_config.pruning_target_parameterization @@ -782,16 +782,17 @@ def _build_opt_config_dict( will then recursively encode them via ``metric_to_dict``, capturing the full metric type. """ - objective_dict = _build_objective_dict( - objective=opt_config.objective, experiment_metrics=experiment_metrics - ) + objective_dicts = [ + _build_objective_dict(objective=obj, experiment_metrics=experiment_metrics) + for obj in opt_config.objectives + ] constraint_dicts = [ _build_constraint_dict(constraint=c, experiment_metrics=experiment_metrics) for c in opt_config.outcome_constraints ] result: dict[str, Any] = { "__type": opt_config.__class__.__name__, - "objective": objective_dict, + "objectives": objective_dicts, "outcome_constraints": constraint_dicts, "pruning_target_parameterization": opt_config.pruning_target_parameterization, } diff --git a/ax/storage/json_store/tests/test_json_store.py b/ax/storage/json_store/tests/test_json_store.py index 783bfce31d9..de3c8eced65 100644 --- a/ax/storage/json_store/tests/test_json_store.py +++ b/ax/storage/json_store/tests/test_json_store.py @@ -137,6 +137,7 @@ get_metric, get_mll_type, get_model_type, + get_moo_optimization_config, get_multi_objective, get_multi_objective_optimization_config, get_multi_type_experiment, @@ -380,6 +381,7 @@ ("Objective", get_objective), ("ObjectiveThreshold", get_objective_threshold), ("OptimizationConfig", get_optimization_config), + ("OptimizationConfig", get_moo_optimization_config), ("OrEarlyStoppingStrategy", get_or_early_stopping_strategy), ("OrderConstraint", get_order_constraint), ("OutcomeConstraint", get_outcome_constraint), @@ -1653,7 +1655,7 @@ def test_optimization_config_with_pruning_target_json_roundtrip(self) -> None: # Setup: create OptimizationConfig with pruning_target_parameterization pruning_target_parameterization = get_arm() optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("test_metric"), minimize=False), + objectives=[Objective(metric=Metric("test_metric"), minimize=False)], pruning_target_parameterization=pruning_target_parameterization, ) @@ -1693,7 +1695,7 @@ def test_multi_objective_optimization_config_with_pruning_target_json_roundtrip( # pruning_target_parameterization pruning_target_parameterization = get_arm() multi_objective_config = MultiObjectiveOptimizationConfig( - objective=get_multi_objective(), + objectives=[get_multi_objective()], pruning_target_parameterization=pruning_target_parameterization, ) @@ -1810,7 +1812,7 @@ def test_optimization_config_with_none_pruning_target_json_roundtrip(self) -> No # Setup: create OptimizationConfig without # pruning_target_parameterization optimization_config = OptimizationConfig( - objective=Objective(metric=Metric("test_metric"), minimize=False), + objectives=[Objective(metric=Metric("test_metric"), minimize=False)], pruning_target_parameterization=None, ) @@ -2355,8 +2357,8 @@ def test_experiment_data_by_trial_bc(self) -> None: # name="test", # search_space=get_branin_search_space(), # optimization_config=OptimizationConfig( - # objective=Objective(metric=Metric(name="a", lower_is_better=True)) - # ), + # objectives=[Objective(metric=Metric(name="a", lower_is_better=True)) + # ]), # tracking_metrics=[Metric(name="b"), Metric(name="c")], # runner=SyntheticRunner(), # ) diff --git a/ax/storage/sqa_store/decoder.py b/ax/storage/sqa_store/decoder.py index 9fb27215f44..bd7b9d8bf5a 100644 --- a/ax/storage/sqa_store/decoder.py +++ b/ax/storage/sqa_store/decoder.py @@ -640,7 +640,7 @@ def opt_config_and_tracking_metrics_from_sqa( register the full metric types (e.g. BraninMetric) rather than plain Metric placeholders. """ - objective = None + objectives: list[Objective] = [] objective_thresholds = [] outcome_constraints = [] tracking_metrics = [] @@ -659,7 +659,7 @@ def opt_config_and_tracking_metrics_from_sqa( result = self.metric_from_sqa(metric_sqa=metric_sqa) if isinstance(result, Objective): - objective = result + objectives.append(result) # Collect metrics from the objective if metric_sqa.intent in ( MetricIntent.MULTI_OBJECTIVE, @@ -729,7 +729,7 @@ def opt_config_and_tracking_metrics_from_sqa( tracking_metrics.append(result) all_metrics.append(raw_metric) - if objective is None: + if not objectives: return None, tracking_metrics, all_metrics if preference_objective_sqa is not None: @@ -737,6 +737,7 @@ def opt_config_and_tracking_metrics_from_sqa( raise SQADecodeError( "PreferenceOptimizationConfig cannot have objective thresholds." ) + objective = objectives[0] properties = preference_objective_sqa.properties or {} optimization_config = PreferenceOptimizationConfig( objective=assert_is_instance(objective, MultiObjective), @@ -747,18 +748,21 @@ def opt_config_and_tracking_metrics_from_sqa( outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) - elif objective_thresholds or type(objective) is MultiObjective: + elif objective_thresholds or type(objectives[0]) is MultiObjective: + objective = objectives[0] optimization_config = MultiObjectiveOptimizationConfig( - objective=assert_is_instance( - objective, Union[MultiObjective, ScalarizedObjective] - ), + objectives=[ + assert_is_instance( + objective, Union[MultiObjective, ScalarizedObjective] + ) + ], outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, pruning_target_parameterization=pruning_target_parameterization, ) else: optimization_config = OptimizationConfig( - objective=objective, + objectives=objectives, outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) diff --git a/ax/storage/sqa_store/encoder.py b/ax/storage/sqa_store/encoder.py index 63ab30eaa41..9023eb07d3f 100644 --- a/ax/storage/sqa_store/encoder.py +++ b/ax/storage/sqa_store/encoder.py @@ -839,13 +839,14 @@ def optimization_config_to_sqa( ), experiment_metrics=experiment_metrics, ) + metrics_sqa.append(obj_sqa) else: - obj_sqa = self.objective_to_sqa( - objective=optimization_config.objective, - experiment_metrics=experiment_metrics, - ) - - metrics_sqa.append(obj_sqa) + for obj in optimization_config.objectives: + obj_sqa = self.objective_to_sqa( + objective=obj, + experiment_metrics=experiment_metrics, + ) + metrics_sqa.append(obj_sqa) for constraint in optimization_config.outcome_constraints: constraint_sqa = self.outcome_constraint_to_sqa( outcome_constraint=constraint, diff --git a/ax/storage/sqa_store/tests/test_sqa_store.py b/ax/storage/sqa_store/tests/test_sqa_store.py index 07de5407c20..9c2cd60dfbe 100644 --- a/ax/storage/sqa_store/tests/test_sqa_store.py +++ b/ax/storage/sqa_store/tests/test_sqa_store.py @@ -159,6 +159,7 @@ get_fixed_parameter, get_generator_run, get_model_predictions_per_arm, + get_moo_optimization_config, get_multi_objective_optimization_config, get_multi_type_experiment, get_objective, @@ -1269,9 +1270,10 @@ def test_experiment_objective_updates(self) -> None: # update objective # (should perform update in place) - optimization_config = get_optimization_config() objective = get_objective(minimize=True) - optimization_config.objective = objective + optimization_config = get_optimization_config().clone_with_args( + objectives=[objective] + ) experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( @@ -1281,8 +1283,8 @@ def test_experiment_objective_updates(self) -> None: # replace objective # (old one should become tracking metric) experiment.add_tracking_metric(Metric(name="objective")) - optimization_config.objective = Objective( - metric=Metric(name="objective"), minimize=False + optimization_config = optimization_config.clone_with_args( + objectives=[Objective(metric=Metric(name="objective"), minimize=False)] ) experiment.optimization_config = optimization_config save_experiment(experiment) @@ -1388,7 +1390,7 @@ def test_optimization_config_pruning_target_parameterization_sqa_roundtrip( ) optimization_config = OptimizationConfig( - objective=get_objective(), + objectives=[get_objective()], outcome_constraints=[get_outcome_constraint()], pruning_target_parameterization=pruning_target_parameterization, ) @@ -1424,6 +1426,18 @@ def test_optimization_config_pruning_target_parameterization_sqa_roundtrip( ) self.assertEqual(loaded_pruning_target_parameterization.parameters["z"], False) + def test_moo_optimization_config_sqa_roundtrip(self) -> None: + """Test SQA round-trip for OptimizationConfig with multiple objectives.""" + experiment = get_experiment_with_batch_trial() + experiment.add_tracking_metric(Metric(name="m3", lower_is_better=True)) + experiment.optimization_config = get_moo_optimization_config() + save_experiment(experiment) + loaded_experiment = load_experiment(experiment.name) + self.assertEqual(experiment, loaded_experiment) + loaded_oc = none_throws(loaded_experiment.optimization_config) + self.assertEqual(len(loaded_oc.objectives), 2) + self.assertTrue(loaded_oc.is_moo_problem) + def test_multi_objective_optimization_config_pruning_target_sqa_roundtrip( self, ) -> None: @@ -1438,7 +1452,7 @@ def test_multi_objective_optimization_config_pruning_target_sqa_roundtrip( ).clone() multi_objective_config = MultiObjectiveOptimizationConfig( - objective=get_multi_objective_optimization_config().objective, + objectives=[get_multi_objective_optimization_config().objective], pruning_target_parameterization=pruning_target_parameterization, ) # Can't use experiment.clone_with_args, so create new experiment diff --git a/ax/utils/preference/preference_utils.py b/ax/utils/preference/preference_utils.py index e69c2fa6ea6..108fa4b6130 100644 --- a/ax/utils/preference/preference_utils.py +++ b/ax/utils/preference/preference_utils.py @@ -57,7 +57,7 @@ def get_preference_adapter( # in the data. Requires optimization_config to specify which metrics to use. pref_metric = Metric(name=Keys.PAIRWISE_PREFERENCE_QUERY.value) optimization_config = OptimizationConfig( - objective=Objective(metric=pref_metric, minimize=False) + objectives=[Objective(metric=pref_metric, minimize=False)] ) # Register the metric on the experiment if not already present. # This is required for _extract_observation_data filtering in TorchAdapter. diff --git a/ax/utils/testing/core_stubs.py b/ax/utils/testing/core_stubs.py index 7c143d45463..ecade1ba2e4 100644 --- a/ax/utils/testing/core_stubs.py +++ b/ax/utils/testing/core_stubs.py @@ -254,7 +254,7 @@ def get_experiment_with_custom_runner_and_metric( outcome_constraints.append(custom_scalarized_constraint) optimization_config = OptimizationConfig( - objective=custom_scalarized_objective, + objectives=[custom_scalarized_objective], outcome_constraints=outcome_constraints, ) else: @@ -566,17 +566,19 @@ def get_branin_experiment_with_timestamp_map_metric( BraninMetric(name="branin_constraint", param_names=["x1", "x2"]) ) optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective(objectives=objectives), + objectives=[MultiObjective(objectives=objectives)], objective_thresholds=objective_thresholds, outcome_constraints=outcome_constraints, ) else: # single objective case optimization_config = OptimizationConfig( - objective=Objective( - metric=local_get_map_metric(name="branin_map"), - minimize=True, - ), + objectives=[ + Objective( + metric=local_get_map_metric(name="branin_map"), + minimize=True, + ) + ], outcome_constraints=outcome_constraints, ) @@ -676,7 +678,7 @@ def get_multi_type_experiment( add_trial_type: bool = True, add_trials: bool = False, num_arms: int = 10 ) -> MultiTypeExperiment: oc = OptimizationConfig( - Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) + objectives=[Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True)] ) experiment = MultiTypeExperiment( name="test_exp", @@ -721,7 +723,7 @@ def get_factorial_experiment( search_space=get_factorial_search_space(), optimization_config=( OptimizationConfig( - objective=Objective(metric=get_factorial_metric(), minimize=False) + objectives=[Objective(metric=get_factorial_metric(), minimize=False)] ) if has_optimization_config else None @@ -1001,7 +1003,7 @@ def get_experiment_with_scalarized_objective_and_outcome_constraint() -> Experim get_scalarized_outcome_constraint(), ] optimization_config = OptimizationConfig( - objective=objective, outcome_constraints=outcome_constraints + objectives=[objective], outcome_constraints=outcome_constraints ) experiment = Experiment( name="test_experiment_scalarized_objective and outcome constraint", @@ -1110,7 +1112,7 @@ def get_experiment_with_observations( tracking_metrics_from_opt_config = list(metrics) if scalarized: optimization_config = OptimizationConfig( - objective=ScalarizedObjective(metrics) + objectives=[ScalarizedObjective(metrics)] ) if constrained: raise NotImplementedError @@ -1126,9 +1128,11 @@ def get_experiment_with_observations( if constraint_metric is not None: tracking_metrics_from_opt_config.append(constraint_metric) optimization_config = MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[Objective(metric=metric) for metric in metrics] - ), + objectives=[ + MultiObjective( + objectives=[Objective(metric=metric) for metric in metrics] + ) + ], objective_thresholds=[ ObjectiveThreshold( metric=metrics[i], @@ -1180,10 +1184,10 @@ def get_experiment_with_observations( relative=False, ) optimization_config = OptimizationConfig( - objective=objective, outcome_constraints=[constraint] + objectives=[objective], outcome_constraints=[constraint] ) else: - optimization_config = OptimizationConfig(objective=objective) + optimization_config = OptimizationConfig(objectives=[objective]) else: tracking_metrics_from_opt_config = [] search_space = search_space or get_search_space_for_range_values(min=0.0, max=1.0) @@ -1281,13 +1285,15 @@ def get_high_dimensional_branin_experiment( sq_parameters = {f"x{i}": 1.0 if i < 25 else 2.0 for i in range(50)} optimization_config = OptimizationConfig( - objective=Objective( - metric=BraninMetric( - name="objective", - param_names=["x19", "x44"], - ), - minimize=True, - ) + objectives=[ + Objective( + metric=BraninMetric( + name="objective", + param_names=["x19", "x44"], + ), + minimize=True, + ) + ] ) exp = Experiment( @@ -2241,9 +2247,13 @@ def get_many_branin_objective_opt_config( n_objectives: int, ) -> MultiObjectiveOptimizationConfig: return MultiObjectiveOptimizationConfig( - objective=MultiObjective( - objectives=[get_branin_objective(name=f"m{i}") for i in range(n_objectives)] - ) + objectives=[ + MultiObjective( + objectives=[ + get_branin_objective(name=f"m{i}") for i in range(n_objectives) + ] + ) + ] ) @@ -2286,13 +2296,13 @@ def get_optimization_config( [get_outcome_constraint(relative=relative)] if outcome_constraint else [] ) return OptimizationConfig( - objective=objective, outcome_constraints=outcome_constraints + objectives=[objective], outcome_constraints=outcome_constraints ) def get_map_optimization_config() -> OptimizationConfig: objective = get_map_objective() - return OptimizationConfig(objective=objective) + return OptimizationConfig(objectives=[objective]) def get_multi_objective_optimization_config( @@ -2309,17 +2319,28 @@ def get_multi_objective_optimization_config( get_objective_threshold(metric_name="m3", comparison_op=ComparisonOp.LEQ), ] return MultiObjectiveOptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, ) +def get_moo_optimization_config() -> OptimizationConfig: + """OptimizationConfig with multiple objectives via objectives= kwarg.""" + sig = {"m1": "m1", "m3": "m3"} + return OptimizationConfig( + objectives=[ + Objective(expression="m1", metric_name_to_signature=sig), + Objective(expression="-m3", metric_name_to_signature=sig), + ], + ) + + def get_optimization_config_no_constraints( minimize: bool = False, ) -> OptimizationConfig: return OptimizationConfig( - objective=Objective(metric=Metric("test_metric"), minimize=minimize) + objectives=[Objective(metric=Metric("test_metric"), minimize=minimize)] ) @@ -2342,7 +2363,7 @@ def get_branin_optimization_config( ) ) return OptimizationConfig( - objective=get_branin_objective(minimize=minimize), + objectives=[get_branin_objective(minimize=minimize)], outcome_constraints=outcome_constraint, ) @@ -2402,7 +2423,7 @@ def get_branin_multi_objective_optimization_config( ) ) return MultiObjectiveOptimizationConfig( - objective=get_branin_multi_objective(num_objectives=num_objectives), + objectives=[get_branin_multi_objective(num_objectives=num_objectives)], objective_thresholds=objective_thresholds, outcome_constraints=outcome_constraints, )