From 69664743a30b97b98bb8363529d1761eba430ede Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 7 Apr 2026 13:22:39 -0700 Subject: [PATCH 1/2] Add `objectives: list[Objective]` to OptimizationConfig (#5150) Summary: Part of the Restrict Objective to Single/Scalarized & Simplify OptimizationConfig design (see design doc: https://docs.google.com/document/d/1EGQYmBjiNGtYapXu1RLHEBdA5Yz2c7q17acX3es0yV8/edit). This is Diff 1 of the stack: enables the new `OptimizationConfig(objectives=[...])` construction path without breaking any existing code. Changes: - `OptimizationConfig.__init__` and `clone_with_args` are now keyword-only across `OptimizationConfig`, `MOOC`, and `PreferenceOptimizationConfig`. All positional callers updated. - New kwarg `objectives: list[Objective] | None = None`, mutually exclusive with `objective`, on both `__init__` and `clone_with_args`. - Internally stores `self._objectives: list[Objective]` (both paths). - New `objectives` property returns the list. - `objective` property raises `UnsupportedError` if `len > 1`. - `is_moo_problem` property: True when multiple objectives or legacy multi-objective expression. - `metric_names`, `metric_name_to_signature`, `metric_signatures` aggregate across all objectives + constraints. - `__repr__` always uses `objectives=`. - JSON storage: encoder uses `objectives` key; decoder has backward compat to convert old `objective` key to `objectives` list. - SQA storage: encoder iterates `objectives` to encode each one; decoder collects multiple OBJECTIVE rows and reconstructs `OptimizationConfig(objectives=...)` when `len > 1`. - Validation ensures no duplicate metrics across objectives and no multi-objective expressions in individual list elements. Differential Revision: D99387020 --- ax/adapter/tests/test_torch_adapter.py | 2 +- ax/core/optimization_config.py | 284 +++++++++++++----- ax/core/tests/test_multi_type_experiment.py | 2 +- ax/core/tests/test_optimization_config.py | 109 ++++++- ax/orchestration/tests/test_orchestrator.py | 4 +- ax/service/tests/test_best_point_utils.py | 6 +- ax/service/tests/test_report_utils.py | 20 +- ax/storage/json_store/decoder.py | 7 + ax/storage/json_store/encoders.py | 11 +- .../json_store/tests/test_json_store.py | 2 + ax/storage/sqa_store/decoder.py | 12 +- ax/storage/sqa_store/encoder.py | 13 +- ax/storage/sqa_store/tests/test_sqa_store.py | 13 + ax/utils/testing/core_stubs.py | 13 +- 14 files changed, 390 insertions(+), 108 deletions(-) diff --git a/ax/adapter/tests/test_torch_adapter.py b/ax/adapter/tests/test_torch_adapter.py index 63b5e55195d..eb21d5b2469 100644 --- a/ax/adapter/tests/test_torch_adapter.py +++ b/ax/adapter/tests/test_torch_adapter.py @@ -1205,7 +1205,7 @@ def test_pairwise_preference_generator(self) -> None: surrogate=surrogate, ), optimization_config=OptimizationConfig( - Objective( + objective=Objective( metric=Metric(Keys.PAIRWISE_PREFERENCE_QUERY.value), minimize=False, ) diff --git a/ax/core/optimization_config.py b/ax/core/optimization_config.py index c2289916da1..c1b1694a323 100644 --- a/ax/core/optimization_config.py +++ b/ax/core/optimization_config.py @@ -15,8 +15,9 @@ from ax.core.arm import Arm from ax.core.objective import Objective from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint -from ax.exceptions.core import UserInputError +from ax.exceptions.core import UnsupportedError, UserInputError from ax.utils.common.base import Base +from pyre_extensions import none_throws TRefPoint = list[OutcomeConstraint] @@ -39,9 +40,12 @@ class OptimizationConfig(Base): - """An optimization configuration, which comprises an objective + """An optimization configuration, which comprises one or more objectives and outcome constraints. + For single-objective optimization, pass a single ``objective``. + For multi-objective optimization, pass a list of ``objectives``. + There is no minimum or maximum number of outcome constraints, but an individual metric can have at most two constraints--which is how we represent metrics with both upper and lower bounds. @@ -49,14 +53,20 @@ class OptimizationConfig(Base): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: list[OutcomeConstraint] | None = None, pruning_target_parameterization: Arm | None = None, ) -> None: """Inits OptimizationConfig. Args: - objective: Metric+direction to use for the optimization. + objective: Metric+direction to use for the optimization. Mutually + exclusive with ``objectives``. + objectives: List of objectives for multi-objective optimization. + Mutually exclusive with ``objective``. Each element must be a + single or scalarized Objective (not multi-objective). outcome_constraints: Constraints on metrics. pruning_target_parameterization: Arm containing the target values for irrelevant parameters. The target values are used to prune irrelevant @@ -70,14 +80,29 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives`. " + "Use `objective` for single-objective optimization or " + "`objectives` for multi-objective optimization." + ) + if objective is None and objectives is None: + raise UserInputError("Must specify either `objective` or `objectives`.") + + if objectives is not None: + if len(objectives) == 0: + raise UserInputError("`objectives` must not be empty.") + self._objectives: list[Objective] = objectives + else: + self._objectives = [none_throws(objective)] + constraints: list[OutcomeConstraint] = ( [] if outcome_constraints is None else outcome_constraints ) self._validate_transformed_optimization_config( - objective=objective, + objectives=self._objectives, outcome_constraints=constraints, ) - self._objective: Objective = objective self._outcome_constraints: list[OutcomeConstraint] = constraints self.pruning_target_parameterization = pruning_target_parameterization @@ -87,13 +112,34 @@ def clone(self) -> Self: def clone_with_args( self, + *, objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, pruning_target_parameterization: Arm | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> Self: - """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + """Make a copy of this optimization config. + + Args: + objective: Replace with a single objective. Mutually exclusive + with ``objectives``. + objectives: Replace with a list of objectives. Mutually exclusive + with ``objective``. + outcome_constraints: Replace outcome constraints. Pass ``None`` + to clear them. + pruning_target_parameterization: Replace the pruning target. + """ + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives` in clone_with_args." + ) + if objective is not None: + cloned_objectives = [objective] + elif objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] outcome_constraints = ( [constraint.clone() for constraint in self.outcome_constraints] if outcome_constraints is _NO_OUTCOME_CONSTRAINTS @@ -106,23 +152,44 @@ def clone_with_args( ) return self.__class__( - objective=objective, + objectives=cloned_objectives, outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) + @property + def objectives(self) -> list[Objective]: + """Get the list of objectives. + + For single-objective optimization, this returns a single-element list. + For multi-objective optimization, this returns all objectives. + """ + return self._objectives + @property def objective(self) -> Objective: - """Get objective.""" - return self._objective + """Get the single objective. + + For single-objective or scalarized-objective configs, returns the + objective. For multi-objective configs (multiple objectives in the + list), raises ``UserInputError`` -- use ``objectives`` instead. + """ + if len(self._objectives) > 1: + raise UnsupportedError( + "This OptimizationConfig has multiple objectives. " + "Use `objectives` to access the list of objectives, or " + "iterate over individual objectives." + ) + return self._objectives[0] @objective.setter def objective(self, objective: Objective) -> None: - """Set objective if not present in outcome constraints.""" + """Set objective. Only valid for single-objective configs.""" self._validate_transformed_optimization_config( - objective, self.outcome_constraints + objectives=[objective], + outcome_constraints=self.outcome_constraints, ) - self._objective = objective + self._objectives = [objective] @property def all_constraints(self) -> list[OutcomeConstraint]: @@ -136,17 +203,26 @@ def outcome_constraints(self) -> list[OutcomeConstraint]: @property def objective_thresholds(self) -> list[OutcomeConstraint]: - """Get objective thresholds.""" + """Get objective thresholds. + + Returns outcome constraints whose primary metric is an objective + metric. + """ + all_obj_metric_names: set[str] = set() + for obj in self._objectives: + all_obj_metric_names.update(obj.metric_names) return [ threshold for threshold in self.outcome_constraints - if threshold.metric_names[0] in self.objective.metric_names + if threshold.metric_names[0] in all_obj_metric_names ] @property def metric_names(self) -> set[str]: - """All metric names referenced by the objective and constraints.""" - names: set[str] = set(self.objective.metric_names) + """All metric names referenced by the objectives and constraints.""" + names: set[str] = set() + for obj in self._objectives: + names.update(obj.metric_names) for oc in self.all_constraints: names.update(oc.metric_names) return names @@ -154,10 +230,11 @@ def metric_names(self) -> set[str]: @property def metric_name_to_signature(self) -> dict[str, str]: """Aggregated mapping from all metric names to their canonical - signatures, across the objective and all constraints. + signatures, across all objectives and all constraints. """ mapping: dict[str, str] = {} - mapping.update(self.objective.metric_name_to_signature) + for obj in self._objectives: + mapping.update(obj.metric_name_to_signature) for constraint in self.all_constraints: mapping.update(constraint.metric_name_to_signature) return mapping @@ -165,22 +242,30 @@ def metric_name_to_signature(self) -> dict[str, str]: def update_metric_name_to_signature_mapping( self, mapping: Mapping[str, str] ) -> None: - """Set the metric name to signature mapping on the objective and all + """Set the metric name to signature mapping on all objectives and constraints. """ - self.objective.update_metric_name_to_signature_mapping(mapping) + for obj in self._objectives: + obj.update_metric_name_to_signature_mapping(mapping) for constraint in self.all_constraints: constraint.update_metric_name_to_signature_mapping(mapping) @property def metric_signatures(self) -> set[str]: - """All metric signatures referenced by the objective and constraints.""" + """All metric signatures referenced by the objectives and constraints.""" mapping = self.metric_name_to_signature return {mapping[name] for name in self.metric_names} @property def is_moo_problem(self) -> bool: - return self.objective is not None and self.objective.is_multi_objective + """Whether this is a multi-objective optimization problem. + + True when there are multiple objectives in the list, or when a single + objective is a (legacy) multi-objective expression. + """ + if len(self._objectives) > 1: + return True + return self._objectives[0].is_multi_objective @property def is_bope_problem(self) -> bool: @@ -195,37 +280,53 @@ def is_bope_problem(self) -> bool: @outcome_constraints.setter def outcome_constraints(self, outcome_constraints: list[OutcomeConstraint]) -> None: """Set outcome constraints if valid, else raise.""" - self._validate_transformed_optimization_config( - objective=self.objective, + unconstrainable: list[str] = [] + for obj in self._objectives: + unconstrainable.extend(obj.get_unconstrainable_metric_names()) + self._validate_outcome_constraints( + unconstrainable_metric_names=unconstrainable, outcome_constraints=outcome_constraints, ) self._outcome_constraints = outcome_constraints @staticmethod def _validate_transformed_optimization_config( - objective: Objective, + objectives: list[Objective], outcome_constraints: list[OutcomeConstraint] | None = None, ) -> None: - """Ensure outcome constraints are valid. + """Validate objectives and outcome constraints. - Either one or two outcome constraints can reference one metric. - If there are two constraints, they must have different 'ops': one - LEQ and one GEQ. - If there are two constraints, the bound of the GEQ op must be less - than the bound of the LEQ op. + Ensures no multi-objective expressions in individual objectives, + no duplicate metrics across objectives, outcome constraints don't + constrain objective metrics, and that constraint pairs on the + same metric are valid. + + Subclasses (e.g. ``MultiObjectiveOptimizationConfig``) override + this to allow multi-objective expressions. Args: - objective: Metric+direction to use for the optimization. + objectives: List of objectives to validate. outcome_constraints: Constraints to validate. """ - if objective.is_multi_objective: - # Raise error on multi-objective; `ScalarizedObjective` is OK - raise ValueError( - "OptimizationConfig does not support MultiObjective. " - "Use MultiObjectiveOptimizationConfig instead." - ) + all_metric_names: list[str] = [] + for obj in objectives: + if obj.is_multi_objective: + raise ValueError( + "Each objective in `objectives` must be a single or " + "scalarized objective, not a multi-objective. " + "Pass each sub-objective as a separate list element." + ) + for name in obj.metric_names: + if name in all_metric_names: + raise UserInputError( + f"Metric '{name}' appears in multiple objectives. " + "Each metric can only appear in one objective." + ) + all_metric_names.append(name) outcome_constraints = outcome_constraints or [] - unconstrainable_metric_names = objective.get_unconstrainable_metric_names() + unconstrainable_metric_names: list[str] = [] + for obj in objectives: + unconstrainable_metric_names.extend(obj.get_unconstrainable_metric_names()) OptimizationConfig._validate_outcome_constraints( unconstrainable_metric_names=unconstrainable_metric_names, outcome_constraints=outcome_constraints, @@ -274,7 +375,7 @@ def constraint_key(oc: OutcomeConstraint) -> str: def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" - "objective=" + repr(self.objective) + ", " + "objectives=" + repr(self._objectives) + ", " "outcome_constraints=" + repr(self.outcome_constraints) + ")" ) @@ -299,7 +400,9 @@ class MultiObjectiveOptimizationConfig(OptimizationConfig): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: list[OutcomeConstraint] | None = None, objective_thresholds: list[OutcomeConstraint] | None = None, pruning_target_parameterization: Arm | None = None, @@ -308,9 +411,12 @@ def __init__( Args: objective: Metric+direction to use for the optimization. Should be either a - MultiObjective or a ScalarizedObjective. + MultiObjective or a ScalarizedObjective. Mutually exclusive with + ``objectives``. + objectives: List containing the objective. Mutually exclusive + with ``objective``. outcome_constraints: Constraints on metrics. - objective_thesholds: Thresholds objectives must exceed. Used for + objective_thresholds: Thresholds objectives must exceed. Used for multi-objective optimization and for calculating frontiers and hypervolumes. pruning_target_parameterization: Arm containing the target values for @@ -325,24 +431,26 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ - constraints: list[OutcomeConstraint] = ( - [] if outcome_constraints is None else outcome_constraints + super().__init__( + objective=objective, + objectives=objectives, + outcome_constraints=outcome_constraints, + pruning_target_parameterization=pruning_target_parameterization, ) - objective_thresholds = objective_thresholds or [] + # Validate and set objective thresholds (MOOC-specific). + self._objective_thresholds: list[OutcomeConstraint] = objective_thresholds or [] self._validate_transformed_optimization_config( - objective=objective, - outcome_constraints=constraints, - objective_thresholds=objective_thresholds, + objectives=self._objectives, + outcome_constraints=self._outcome_constraints, + objective_thresholds=self._objective_thresholds, ) - self._objective: Objective = objective - self._outcome_constraints: list[OutcomeConstraint] = constraints - self._objective_thresholds: list[OutcomeConstraint] = objective_thresholds - self.pruning_target_parameterization = pruning_target_parameterization # pyre-fixme[14]: Inconsistent override. def clone_with_args( self, + *, objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, objective_thresholds: None | (list[OutcomeConstraint]) = _NO_OBJECTIVE_THRESHOLDS, @@ -350,7 +458,16 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> "MultiObjectiveOptimizationConfig": """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives` in clone_with_args." + ) + if objective is not None: + cloned_objectives = [objective] + elif objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] outcome_constraints = ( [constraint.clone() for constraint in self.outcome_constraints] if outcome_constraints is _NO_OUTCOME_CONSTRAINTS @@ -367,7 +484,7 @@ def clone_with_args( else pruning_target_parameterization ) return MultiObjectiveOptimizationConfig( - objective=objective, + objectives=cloned_objectives, outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, pruning_target_parameterization=pruning_target_parameterization, @@ -375,18 +492,18 @@ def clone_with_args( @property def objective(self) -> Objective: - """Get objective.""" - return self._objective + """Get the (multi-)objective.""" + return self._objectives[0] @objective.setter def objective(self, objective: Objective) -> None: """Set objective if not present in outcome constraints.""" self._validate_transformed_optimization_config( - objective=objective, + objectives=[objective], outcome_constraints=self.outcome_constraints, objective_thresholds=self.objective_thresholds, ) - self._objective = objective + self._objectives = [objective] @property def all_constraints(self) -> list[OutcomeConstraint]: @@ -404,7 +521,7 @@ def objective_thresholds( ) -> None: """Set outcome constraints if valid, else raise.""" self._validate_transformed_optimization_config( - objective=self.objective, + objectives=self._objectives, objective_thresholds=objective_thresholds, ) self._objective_thresholds = objective_thresholds @@ -418,7 +535,7 @@ def objective_thresholds_dict(self) -> dict[str, OutcomeConstraint]: @staticmethod def _validate_transformed_optimization_config( - objective: Objective, + objectives: list[Objective], outcome_constraints: list[OutcomeConstraint] | None = None, objective_thresholds: list[OutcomeConstraint] | None = None, ) -> None: @@ -431,10 +548,11 @@ def _validate_transformed_optimization_config( than the bound of the LEQ op. Args: - objective: Metric+direction to use for the optimization. + objectives: List of objectives to validate. outcome_constraints: Constraints to validate. objective_thresholds: Thresholds objectives must exceed. """ + objective = objectives[0] if not (objective.is_multi_objective or objective.is_scalarized_objective): raise TypeError( "`MultiObjectiveOptimizationConfig` requires an objective " @@ -514,7 +632,9 @@ def check_objective_thresholds_match_objectives( class PreferenceOptimizationConfig(MultiObjectiveOptimizationConfig): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, preference_profile_name: str, outcome_constraints: list[OutcomeConstraint] | None = None, expect_relativized_outcomes: bool = False, @@ -524,7 +644,9 @@ def __init__( Args: objective: Metric+direction to use for the optimization. Should be a - MultiObjective. + MultiObjective. Mutually exclusive with ``objectives``. + objectives: List containing the objective. Mutually exclusive + with ``objective``. preference_profile_name: The name of the auxiliary experiment to use as the preference profile for the experiment. An auxiliary experiment with this name and purpose PE_EXPERIMENT should be attached to @@ -548,12 +670,6 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ - if not objective.is_multi_objective: - raise TypeError( - "`PreferenceOptimizationConfig` requires a multi-objective. " - "Use `OptimizationConfig` instead if using a " - "single-metric objective." - ) if outcome_constraints: raise NotImplementedError( "Outcome constraints are not yet supported in " @@ -563,10 +679,19 @@ def __init__( # Call parent's __init__ with objective_thresholds=None super().__init__( objective=objective, + objectives=objectives, outcome_constraints=outcome_constraints, objective_thresholds=None, pruning_target_parameterization=pruning_target_parameterization, ) + # Validate that the objective is multi-objective (after super sets + # self._objectives). + if not self._objectives[0].is_multi_objective: + raise TypeError( + "`PreferenceOptimizationConfig` requires a multi-objective. " + "Use `OptimizationConfig` instead if using a " + "single-metric objective." + ) self.preference_profile_name = preference_profile_name self.expect_relativized_outcomes = expect_relativized_outcomes @@ -583,7 +708,9 @@ def is_bope_problem(self) -> bool: # pyre-ignore[14]: Inconsistent override. def clone_with_args( self, + *, objective: Objective | None = None, + objectives: list[Objective] | None = None, preference_profile_name: str | None = None, outcome_constraints: list[OutcomeConstraint] | None = _NO_OUTCOME_CONSTRAINTS, expect_relativized_outcomes: bool | None = None, @@ -591,7 +718,16 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> PreferenceOptimizationConfig: """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives` in clone_with_args." + ) + if objective is not None: + cloned_objectives = [objective] + elif objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] preference_profile_name = ( self.preference_profile_name @@ -615,7 +751,7 @@ def clone_with_args( ) return PreferenceOptimizationConfig( - objective=objective, + objectives=cloned_objectives, preference_profile_name=preference_profile_name, outcome_constraints=outcome_constraints, expect_relativized_outcomes=expect_relativized_outcomes, diff --git a/ax/core/tests/test_multi_type_experiment.py b/ax/core/tests/test_multi_type_experiment.py index b314cfd5b75..7775cea3f5b 100644 --- a/ax/core/tests/test_multi_type_experiment.py +++ b/ax/core/tests/test_multi_type_experiment.py @@ -154,7 +154,7 @@ def test_setting_opt_config(self) -> None: m3 = BraninMetric("m3", ["x1", "x2"]) self.experiment.add_tracking_metric(m3) self.experiment.optimization_config = OptimizationConfig( - Objective(metric=m3, minimize=True) + objective=Objective(metric=m3, minimize=True) ) self.assertDictEqual( self.experiment._metric_to_trial_type, diff --git a/ax/core/tests/test_optimization_config.py b/ax/core/tests/test_optimization_config.py index 29f253998d0..c8b5c713883 100644 --- a/ax/core/tests/test_optimization_config.py +++ b/ax/core/tests/test_optimization_config.py @@ -21,14 +21,14 @@ ScalarizedOutcomeConstraint, ) from ax.core.types import ComparisonOp -from ax.exceptions.core import UserInputError +from ax.exceptions.core import UnsupportedError, UserInputError from ax.utils.common.testutils import TestCase from pyre_extensions import assert_is_instance OC_STR = ( "OptimizationConfig(" - 'objective=Objective(expression="m1"), ' + 'objectives=[Objective(expression="m1")], ' "outcome_constraints=[OutcomeConstraint(m3 >= -0.25), " "OutcomeConstraint(m4 <= 0.25), " "ScalarizedOutcomeConstraint(0.5*m3 + 0.5*m4 >= 0.9975 * baseline)])" @@ -271,6 +271,111 @@ def test_CloneWithArgs(self) -> None: ) +class OptimizationConfigObjectivesListTest(TestCase): + """Tests for the new `OptimizationConfig(objectives=[...])` construction path.""" + + def setUp(self) -> None: + super().setUp() + self.metrics = { + "m1": Metric(name="m1"), + "m2": Metric(name="m2"), + "m3": Metric(name="m3"), + } + self.sig = {m: m for m in self.metrics} + self.obj1 = Objective(expression="m1", metric_name_to_signature=self.sig) + self.obj2 = Objective(expression="-m2", metric_name_to_signature=self.sig) + self.scalarized_obj = Objective( + expression="2*m1 + m2", metric_name_to_signature=self.sig + ) + + def test_objectives_kwarg_construction(self) -> None: + """Test single and multi-objective construction via objectives kwarg.""" + # Single objective + config = OptimizationConfig(objectives=[self.obj1]) + self.assertEqual(config.objectives, [self.obj1]) + self.assertEqual(config.objective, self.obj1) + self.assertFalse(config.is_moo_problem) + + # Multi-objective + config = OptimizationConfig(objectives=[self.obj1, self.obj2]) + self.assertEqual(config.objectives, [self.obj1, self.obj2]) + self.assertTrue(config.is_moo_problem) + with self.assertRaisesRegex(UnsupportedError, "multiple objectives"): + config.objective + + def test_objectives_kwarg_metric_aggregation(self) -> None: + """Test metric_names, metric_name_to_signature, metric_signatures.""" + constraint = OutcomeConstraint( + expression="m3 >= 0.5", metric_name_to_signature=self.sig + ) + config = OptimizationConfig( + objectives=[self.obj1, self.obj2], + outcome_constraints=[constraint], + ) + self.assertEqual(config.metric_names, {"m1", "m2", "m3"}) + self.assertEqual( + config.metric_name_to_signature, {"m1": "m1", "m2": "m2", "m3": "m3"} + ) + self.assertEqual(config.metric_signatures, {"m1", "m2", "m3"}) + + def test_objectives_kwarg_validation(self) -> None: + """Test validation errors for objectives kwarg.""" + with self.subTest("mutual_exclusivity"): + with self.assertRaisesRegex(UserInputError, "Cannot specify both"): + OptimizationConfig(objective=self.obj1, objectives=[self.obj1]) + + with self.subTest("neither_specified"): + with self.assertRaisesRegex(UserInputError, "Must specify either"): + OptimizationConfig() + + with self.subTest("empty_list"): + with self.assertRaisesRegex(UserInputError, "must not be empty"): + OptimizationConfig(objectives=[]) + + with self.subTest("multi_objective_expression"): + multi_obj = Objective( + expression="m1, -m2", metric_name_to_signature=self.sig + ) + with self.assertRaisesRegex(ValueError, "single or scalarized"): + OptimizationConfig(objectives=[multi_obj]) + + with self.subTest("duplicate_metric_names"): + obj_dup = Objective(expression="m1", metric_name_to_signature=self.sig) + with self.assertRaisesRegex(UserInputError, "appears in multiple"): + OptimizationConfig(objectives=[self.obj1, obj_dup]) + + def test_objectives_kwarg_clone_and_repr(self) -> None: + """Test clone, clone_with_args, and repr for objectives-list configs.""" + config = OptimizationConfig(objectives=[self.obj1, self.obj2]) + + # clone preserves objectives + cloned = config.clone() + self.assertEqual(len(cloned.objectives), 2) + self.assertEqual(cloned.objectives[0].expression, "m1") + self.assertEqual(cloned.objectives[1].expression, "-m2") + self.assertTrue(cloned.is_moo_problem) + + # clone_with_args(objective=) replaces the list with a single objective + cloned = config.clone_with_args(objective=self.obj1) + self.assertEqual(len(cloned.objectives), 1) + self.assertFalse(cloned.is_moo_problem) + + # clone_with_args(objectives=) replaces the list + obj3 = Objective(expression="m3", metric_name_to_signature=self.sig) + cloned = config.clone_with_args(objectives=[self.obj1, obj3]) + self.assertEqual(len(cloned.objectives), 2) + self.assertEqual(cloned.objectives[1].expression, "m3") + + # objective= and objectives= are mutually exclusive in clone_with_args + with self.assertRaisesRegex(UserInputError, "Cannot specify both"): + config.clone_with_args(objective=self.obj1, objectives=[self.obj1]) + + # repr always uses "objectives=" + self.assertIn("objectives=", repr(config)) + single_config = OptimizationConfig(objectives=[self.obj1]) + self.assertIn("objectives=", repr(single_config)) + + class MultiObjectiveOptimizationConfigTest(TestCase): def setUp(self) -> None: super().setUp() diff --git a/ax/orchestration/tests/test_orchestrator.py b/ax/orchestration/tests/test_orchestrator.py index 023f970fca6..ab9bbcc17b8 100644 --- a/ax/orchestration/tests/test_orchestrator.py +++ b/ax/orchestration/tests/test_orchestrator.py @@ -2727,7 +2727,7 @@ def test_generate_candidates_does_not_generate_if_missing_data(self) -> None: ) self.branin_experiment.add_tracking_metric(custom_metric) self.branin_experiment.optimization_config = OptimizationConfig( - Objective( + objective=Objective( metric=CustomTestMetric( name="custom_test_metric", test_attribute="test" ), @@ -2974,7 +2974,7 @@ def setUp(self) -> None: self.branin_experiment_no_impl_runner_or_metrics = MultiTypeExperiment( search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - Objective(metric=Metric(name="branin"), minimize=True) + objective=Objective(metric=Metric(name="branin"), minimize=True) ), default_trial_type="type1", default_runner=None, diff --git a/ax/service/tests/test_best_point_utils.py b/ax/service/tests/test_best_point_utils.py index 4101f2fd6d9..dc597cd2006 100644 --- a/ax/service/tests/test_best_point_utils.py +++ b/ax/service/tests/test_best_point_utils.py @@ -615,7 +615,7 @@ def test_best_raw_objective_point_scalarized(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + objective=ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(exp) @@ -637,7 +637,7 @@ def test_best_raw_objective_point_scalarized_multi(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - ScalarizedObjective( + objective=ScalarizedObjective( metrics=[get_branin_metric(), get_branin_metric(lower_is_better=False)], weights=[0.1, -0.9], minimize=True, @@ -1037,7 +1037,7 @@ def test_best_parameters_from_model_predictions_scalarized(self) -> None: ) exp.add_tracking_metric(metric2) exp.optimization_config = OptimizationConfig( - ScalarizedObjective( + objective=ScalarizedObjective( metrics=[metric1, metric2], weights=[0.5, 0.5], minimize=True, diff --git a/ax/service/tests/test_report_utils.py b/ax/service/tests/test_report_utils.py index 0733f65cd0c..12546f0bcbd 100644 --- a/ax/service/tests/test_report_utils.py +++ b/ax/service/tests/test_report_utils.py @@ -449,10 +449,12 @@ def _test_get_standard_plots_moo_relative_constraints( names = obj.metric_names # Create a new Objective rather than mutating _expression_str to # avoid stale _parsed cached_property. - none_throws(exp.optimization_config)._objective = Objective( - expression=f"{names[0]}, -{names[1]}", - metric_name_to_signature={n: n for n in names}, - ) + none_throws(exp.optimization_config)._objectives = [ + Objective( + expression=f"{names[0]}, -{names[1]}", + metric_name_to_signature={n: n for n in names}, + ) + ] exp.get_metric(names[0]).lower_is_better = False assert_is_instance( exp.optimization_config, MultiObjectiveOptimizationConfig @@ -494,10 +496,12 @@ def test_get_standard_plots_moo_no_objective_thresholds(self) -> None: # first objective to maximize, second to minimize obj = none_throws(exp.optimization_config).objective names = obj.metric_names - none_throws(exp.optimization_config)._objective = Objective( - expression=f"{names[0]}, -{names[1]}", - metric_name_to_signature={n: n for n in names}, - ) + none_throws(exp.optimization_config)._objectives = [ + Objective( + expression=f"{names[0]}, -{names[1]}", + metric_name_to_signature={n: n for n in names}, + ) + ] exp.trials[0].run() plots = get_standard_plots( experiment=exp, diff --git a/ax/storage/json_store/decoder.py b/ax/storage/json_store/decoder.py index 4259a071c17..85a3f7b5a62 100644 --- a/ax/storage/json_store/decoder.py +++ b/ax/storage/json_store/decoder.py @@ -351,6 +351,13 @@ def object_from_json( object_json = _sanitize_inputs_to_surrogate_spec(object_json=object_json) if isclass(_class) and issubclass(_class, OptimizationConfig): object_json.pop("risk_measure", None) # Deprecated. + # Backward compat: old JSON uses "objective", new uses "objectives". + if ( + _class is OptimizationConfig + and "objective" in object_json + and "objectives" not in object_json + ): + object_json["objectives"] = [object_json.pop("objective")] return ax_class_from_json_dict( _class=_class, object_json=object_json, **vars(registry_kwargs) ) diff --git a/ax/storage/json_store/encoders.py b/ax/storage/json_store/encoders.py index d6f960ffdcc..024f04e9bcc 100644 --- a/ax/storage/json_store/encoders.py +++ b/ax/storage/json_store/encoders.py @@ -383,7 +383,7 @@ def optimization_config_to_dict( """Convert Ax optimization config to a dictionary.""" return { "__type": optimization_config.__class__.__name__, - "objective": optimization_config.objective, + "objectives": optimization_config.objectives, "outcome_constraints": optimization_config.outcome_constraints, "pruning_target_parameterization": ( optimization_config.pruning_target_parameterization @@ -782,16 +782,17 @@ def _build_opt_config_dict( will then recursively encode them via ``metric_to_dict``, capturing the full metric type. """ - objective_dict = _build_objective_dict( - objective=opt_config.objective, experiment_metrics=experiment_metrics - ) + objective_dicts = [ + _build_objective_dict(objective=obj, experiment_metrics=experiment_metrics) + for obj in opt_config.objectives + ] constraint_dicts = [ _build_constraint_dict(constraint=c, experiment_metrics=experiment_metrics) for c in opt_config.outcome_constraints ] result: dict[str, Any] = { "__type": opt_config.__class__.__name__, - "objective": objective_dict, + "objectives": objective_dicts, "outcome_constraints": constraint_dicts, "pruning_target_parameterization": opt_config.pruning_target_parameterization, } diff --git a/ax/storage/json_store/tests/test_json_store.py b/ax/storage/json_store/tests/test_json_store.py index 783bfce31d9..207fb838055 100644 --- a/ax/storage/json_store/tests/test_json_store.py +++ b/ax/storage/json_store/tests/test_json_store.py @@ -137,6 +137,7 @@ get_metric, get_mll_type, get_model_type, + get_moo_optimization_config, get_multi_objective, get_multi_objective_optimization_config, get_multi_type_experiment, @@ -380,6 +381,7 @@ ("Objective", get_objective), ("ObjectiveThreshold", get_objective_threshold), ("OptimizationConfig", get_optimization_config), + ("OptimizationConfig", get_moo_optimization_config), ("OrEarlyStoppingStrategy", get_or_early_stopping_strategy), ("OrderConstraint", get_order_constraint), ("OutcomeConstraint", get_outcome_constraint), diff --git a/ax/storage/sqa_store/decoder.py b/ax/storage/sqa_store/decoder.py index 9fb27215f44..d9754599675 100644 --- a/ax/storage/sqa_store/decoder.py +++ b/ax/storage/sqa_store/decoder.py @@ -640,7 +640,7 @@ def opt_config_and_tracking_metrics_from_sqa( register the full metric types (e.g. BraninMetric) rather than plain Metric placeholders. """ - objective = None + objectives: list[Objective] = [] objective_thresholds = [] outcome_constraints = [] tracking_metrics = [] @@ -659,7 +659,7 @@ def opt_config_and_tracking_metrics_from_sqa( result = self.metric_from_sqa(metric_sqa=metric_sqa) if isinstance(result, Objective): - objective = result + objectives.append(result) # Collect metrics from the objective if metric_sqa.intent in ( MetricIntent.MULTI_OBJECTIVE, @@ -729,7 +729,7 @@ def opt_config_and_tracking_metrics_from_sqa( tracking_metrics.append(result) all_metrics.append(raw_metric) - if objective is None: + if not objectives: return None, tracking_metrics, all_metrics if preference_objective_sqa is not None: @@ -737,6 +737,7 @@ def opt_config_and_tracking_metrics_from_sqa( raise SQADecodeError( "PreferenceOptimizationConfig cannot have objective thresholds." ) + objective = objectives[0] properties = preference_objective_sqa.properties or {} optimization_config = PreferenceOptimizationConfig( objective=assert_is_instance(objective, MultiObjective), @@ -747,7 +748,8 @@ def opt_config_and_tracking_metrics_from_sqa( outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) - elif objective_thresholds or type(objective) is MultiObjective: + elif objective_thresholds or type(objectives[0]) is MultiObjective: + objective = objectives[0] optimization_config = MultiObjectiveOptimizationConfig( objective=assert_is_instance( objective, Union[MultiObjective, ScalarizedObjective] @@ -758,7 +760,7 @@ def opt_config_and_tracking_metrics_from_sqa( ) else: optimization_config = OptimizationConfig( - objective=objective, + objectives=objectives, outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) diff --git a/ax/storage/sqa_store/encoder.py b/ax/storage/sqa_store/encoder.py index 63ab30eaa41..9023eb07d3f 100644 --- a/ax/storage/sqa_store/encoder.py +++ b/ax/storage/sqa_store/encoder.py @@ -839,13 +839,14 @@ def optimization_config_to_sqa( ), experiment_metrics=experiment_metrics, ) + metrics_sqa.append(obj_sqa) else: - obj_sqa = self.objective_to_sqa( - objective=optimization_config.objective, - experiment_metrics=experiment_metrics, - ) - - metrics_sqa.append(obj_sqa) + for obj in optimization_config.objectives: + obj_sqa = self.objective_to_sqa( + objective=obj, + experiment_metrics=experiment_metrics, + ) + metrics_sqa.append(obj_sqa) for constraint in optimization_config.outcome_constraints: constraint_sqa = self.outcome_constraint_to_sqa( outcome_constraint=constraint, diff --git a/ax/storage/sqa_store/tests/test_sqa_store.py b/ax/storage/sqa_store/tests/test_sqa_store.py index 07de5407c20..f7b98b8d0a5 100644 --- a/ax/storage/sqa_store/tests/test_sqa_store.py +++ b/ax/storage/sqa_store/tests/test_sqa_store.py @@ -159,6 +159,7 @@ get_fixed_parameter, get_generator_run, get_model_predictions_per_arm, + get_moo_optimization_config, get_multi_objective_optimization_config, get_multi_type_experiment, get_objective, @@ -1424,6 +1425,18 @@ def test_optimization_config_pruning_target_parameterization_sqa_roundtrip( ) self.assertEqual(loaded_pruning_target_parameterization.parameters["z"], False) + def test_moo_optimization_config_sqa_roundtrip(self) -> None: + """Test SQA round-trip for OptimizationConfig with multiple objectives.""" + experiment = get_experiment_with_batch_trial() + experiment.add_tracking_metric(Metric(name="m3", lower_is_better=True)) + experiment.optimization_config = get_moo_optimization_config() + save_experiment(experiment) + loaded_experiment = load_experiment(experiment.name) + self.assertEqual(experiment, loaded_experiment) + loaded_oc = none_throws(loaded_experiment.optimization_config) + self.assertEqual(len(loaded_oc.objectives), 2) + self.assertTrue(loaded_oc.is_moo_problem) + def test_multi_objective_optimization_config_pruning_target_sqa_roundtrip( self, ) -> None: diff --git a/ax/utils/testing/core_stubs.py b/ax/utils/testing/core_stubs.py index 7c143d45463..6c452acbd6b 100644 --- a/ax/utils/testing/core_stubs.py +++ b/ax/utils/testing/core_stubs.py @@ -676,7 +676,7 @@ def get_multi_type_experiment( add_trial_type: bool = True, add_trials: bool = False, num_arms: int = 10 ) -> MultiTypeExperiment: oc = OptimizationConfig( - Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) + objective=Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) ) experiment = MultiTypeExperiment( name="test_exp", @@ -2315,6 +2315,17 @@ def get_multi_objective_optimization_config( ) +def get_moo_optimization_config() -> OptimizationConfig: + """OptimizationConfig with multiple objectives via objectives= kwarg.""" + sig = {"m1": "m1", "m3": "m3"} + return OptimizationConfig( + objectives=[ + Objective(expression="m1", metric_name_to_signature=sig), + Objective(expression="-m3", metric_name_to_signature=sig), + ], + ) + + def get_optimization_config_no_constraints( minimize: bool = False, ) -> OptimizationConfig: From 7e4a99f719a5646687f8f43920ea4c9e92f48f4e Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 7 Apr 2026 13:22:39 -0700 Subject: [PATCH 2/2] Deprecate OptimizationConfig.__init__(objective=), remove setter & clone_with_args(objective=) (#5151) Summary: Goal: We added `objectives` input, we're now migrating all usage to `objectives` and eliminating the `objective` input. This will make it easier to eliminate `MultiObjective` and `MultiObjectiveOptimizationConfig`. Phase 1 of the OptimizationConfig simplification migration: - Add DeprecationWarning when passing objective= to OptimizationConfig.__init__ - Remove the objective setter on OptimizationConfig (MOOC keeps its own) - Remove objective= param from OptimizationConfig.clone_with_args (MOOC/PreferenceOC keep theirs) - Migrate all callers of the removed setter and clone_with_args(objective=) Differential Revision: D99491494 --- ax/adapter/tests/test_torch_moo_adapter.py | 13 ++-- ax/adapter/transforms/relativize.py | 7 ++- ax/adapter/transforms/standardize_y.py | 10 ++-- .../transforms/stratified_standardize_y.py | 10 ++-- ax/core/optimization_config.py | 59 ++++++------------- ax/core/tests/test_optimization_config.py | 16 +---- ax/service/tests/test_best_point.py | 28 +++++---- ax/service/tests/test_best_point_utils.py | 31 ++++++---- ax/storage/sqa_store/tests/test_sqa_store.py | 9 +-- 9 files changed, 86 insertions(+), 97 deletions(-) diff --git a/ax/adapter/tests/test_torch_moo_adapter.py b/ax/adapter/tests/test_torch_moo_adapter.py index d929c3f15a7..7f8aebca3d5 100644 --- a/ax/adapter/tests/test_torch_moo_adapter.py +++ b/ax/adapter/tests/test_torch_moo_adapter.py @@ -333,7 +333,6 @@ def test_hypervolume(self, _, cuda: bool = False) -> None: ) for trial in exp.trials.values(): trial.mark_running(no_runner_required=True).mark_completed() - # pyre-fixme[16]: Optional type has no attribute `metrics`. metrics_dict = exp.metrics # Objective thresholds and synthetic observations chosen to have closed-form # hypervolumes to test. @@ -464,9 +463,15 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None: first = sub_exprs[0] if not first.startswith("-"): sub_exprs[0] = f"-{first}" - oc.objective = Objective( - expression=", ".join(sub_exprs), - metric_name_to_signature={s.lstrip("-"): s.lstrip("-") for s in sub_exprs}, + oc = oc.clone_with_args( + objectives=[ + Objective( + expression=", ".join(sub_exprs), + metric_name_to_signature={ + s.lstrip("-"): s.lstrip("-") for s in sub_exprs + }, + ) + ] ) for use_partial_thresholds in (False, True): diff --git a/ax/adapter/transforms/relativize.py b/ax/adapter/transforms/relativize.py index d3fb5dd2376..4d81cb0369d 100644 --- a/ax/adapter/transforms/relativize.py +++ b/ax/adapter/transforms/relativize.py @@ -157,7 +157,7 @@ def transform_optimization_config( "Expected multi-objective, got single-objective" ) new_optimization_config = optimization_config.clone_with_args( - objective=objective, + objectives=[objective], outcome_constraints=constraints, ) elif isinstance(optimization_config, MultiObjectiveOptimizationConfig): @@ -174,13 +174,14 @@ def transform_optimization_config( ) new_optimization_config = optimization_config.clone_with_args( - objective=optimization_config.objective, + objectives=[optimization_config.objective], outcome_constraints=constraints, objective_thresholds=obj_thresholds, ) else: new_optimization_config = optimization_config.clone_with_args( - objective=optimization_config.objective, outcome_constraints=constraints + objectives=[optimization_config.objective], + outcome_constraints=constraints, ) return new_optimization_config diff --git a/ax/adapter/transforms/standardize_y.py b/ax/adapter/transforms/standardize_y.py index 2deaf462583..72513d44bd4 100644 --- a/ax/adapter/transforms/standardize_y.py +++ b/ax/adapter/transforms/standardize_y.py @@ -133,10 +133,12 @@ def transform_optimization_config( (name, new_w) for (name, _), new_w in zip(objective.metric_weights, new_weights) ] - optimization_config.objective = _build_objective_from_metric_weights( - new_metric_weights, - metric_name_to_signature=objective.metric_name_to_signature, - ) + optimization_config._objectives = [ + _build_objective_from_metric_weights( + new_metric_weights, + metric_name_to_signature=objective.metric_name_to_signature, + ) + ] new_constraints = self._transform_constraints( optimization_config.outcome_constraints, adapter diff --git a/ax/adapter/transforms/stratified_standardize_y.py b/ax/adapter/transforms/stratified_standardize_y.py index 9ae063c3507..15670fe7f49 100644 --- a/ax/adapter/transforms/stratified_standardize_y.py +++ b/ax/adapter/transforms/stratified_standardize_y.py @@ -196,10 +196,12 @@ def transform_optimization_config( (name, new_w) for (name, _), new_w in zip(objective.metric_weights, new_weights) ] - optimization_config.objective = _build_objective_from_metric_weights( - new_metric_weights, - metric_name_to_signature=objective.metric_name_to_signature, - ) + optimization_config._objectives = [ + _build_objective_from_metric_weights( + new_metric_weights, + metric_name_to_signature=objective.metric_name_to_signature, + ) + ] optimization_config.outcome_constraints = self._transform_constraints( optimization_config.outcome_constraints, strata, adapter diff --git a/ax/core/optimization_config.py b/ax/core/optimization_config.py index c1b1694a323..8bbb0e28470 100644 --- a/ax/core/optimization_config.py +++ b/ax/core/optimization_config.py @@ -8,6 +8,7 @@ from __future__ import annotations +import warnings from collections.abc import Mapping from itertools import groupby from typing import Self @@ -80,11 +81,17 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ + if objective is not None: + warnings.warn( + "Passing `objective` to OptimizationConfig is deprecated. " + "Use `objectives=[objective]` instead.", + DeprecationWarning, + stacklevel=2, + ) if objective is not None and objectives is not None: raise UserInputError( "Cannot specify both `objective` and `objectives`. " - "Use `objective` for single-objective optimization or " - "`objectives` for multi-objective optimization." + "Use `objectives=[objective]` instead." ) if objective is None and objectives is None: raise UserInputError("Must specify either `objective` or `objectives`.") @@ -113,7 +120,6 @@ def clone(self) -> Self: def clone_with_args( self, *, - objective: Objective | None = None, objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, pruning_target_parameterization: Arm @@ -122,21 +128,12 @@ def clone_with_args( """Make a copy of this optimization config. Args: - objective: Replace with a single objective. Mutually exclusive - with ``objectives``. - objectives: Replace with a list of objectives. Mutually exclusive - with ``objective``. + objectives: Replace with a list of objectives. outcome_constraints: Replace outcome constraints. Pass ``None`` to clear them. pruning_target_parameterization: Replace the pruning target. """ - if objective is not None and objectives is not None: - raise UserInputError( - "Cannot specify both `objective` and `objectives` in clone_with_args." - ) - if objective is not None: - cloned_objectives = [objective] - elif objectives is not None: + if objectives is not None: cloned_objectives = objectives else: cloned_objectives = [obj.clone() for obj in self._objectives] @@ -182,15 +179,6 @@ def objective(self) -> Objective: ) return self._objectives[0] - @objective.setter - def objective(self, objective: Objective) -> None: - """Set objective. Only valid for single-objective configs.""" - self._validate_transformed_optimization_config( - objectives=[objective], - outcome_constraints=self.outcome_constraints, - ) - self._objectives = [objective] - @property def all_constraints(self) -> list[OutcomeConstraint]: """Get outcome constraints.""" @@ -449,7 +437,6 @@ def __init__( def clone_with_args( self, *, - objective: Objective | None = None, objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, objective_thresholds: None @@ -458,13 +445,7 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> "MultiObjectiveOptimizationConfig": """Make a copy of this optimization config.""" - if objective is not None and objectives is not None: - raise UserInputError( - "Cannot specify both `objective` and `objectives` in clone_with_args." - ) - if objective is not None: - cloned_objectives = [objective] - elif objectives is not None: + if objectives is not None: cloned_objectives = objectives else: cloned_objectives = [obj.clone() for obj in self._objectives] @@ -709,7 +690,6 @@ def is_bope_problem(self) -> bool: def clone_with_args( self, *, - objective: Objective | None = None, objectives: list[Objective] | None = None, preference_profile_name: str | None = None, outcome_constraints: list[OutcomeConstraint] | None = _NO_OUTCOME_CONSTRAINTS, @@ -718,16 +698,11 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> PreferenceOptimizationConfig: """Make a copy of this optimization config.""" - if objective is not None and objectives is not None: - raise UserInputError( - "Cannot specify both `objective` and `objectives` in clone_with_args." - ) - if objective is not None: - cloned_objectives = [objective] - elif objectives is not None: - cloned_objectives = objectives - else: - cloned_objectives = [obj.clone() for obj in self._objectives] + cloned_objectives = ( + [obj.clone() for obj in self._objectives] + if objectives is None + else objectives + ) preference_profile_name = ( self.preference_profile_name diff --git a/ax/core/tests/test_optimization_config.py b/ax/core/tests/test_optimization_config.py index c8b5c713883..d03143422f7 100644 --- a/ax/core/tests/test_optimization_config.py +++ b/ax/core/tests/test_optimization_config.py @@ -84,8 +84,6 @@ def test_Init(self) -> None: objective=self.objective, outcome_constraints=self.outcome_constraints ) self.assertEqual(str(config1), OC_STR) - with self.assertRaises(ValueError): - config1.objective = self.alt_objective # constrained Objective. # updating constraints is fine. config1.outcome_constraints = [self.outcome_constraint] self.assertEqual(len(config1.metric_names), 2) @@ -94,10 +92,6 @@ def test_Init(self) -> None: config2 = OptimizationConfig(objective=self.objective) self.assertEqual(config2.outcome_constraints, []) - # setting objective is fine too, if it's compatible with constraints.. - config2.objective = self.m2_objective - # setting constraints on objectives is fine for MultiObjective components. - config2.outcome_constraints = self.outcome_constraints self.assertEqual(config2.outcome_constraints, self.outcome_constraints) @@ -355,8 +349,8 @@ def test_objectives_kwarg_clone_and_repr(self) -> None: self.assertEqual(cloned.objectives[1].expression, "-m2") self.assertTrue(cloned.is_moo_problem) - # clone_with_args(objective=) replaces the list with a single objective - cloned = config.clone_with_args(objective=self.obj1) + # clone_with_args(objectives=) replaces the list with a single objective + cloned = config.clone_with_args(objectives=[self.obj1]) self.assertEqual(len(cloned.objectives), 1) self.assertFalse(cloned.is_moo_problem) @@ -366,10 +360,6 @@ def test_objectives_kwarg_clone_and_repr(self) -> None: self.assertEqual(len(cloned.objectives), 2) self.assertEqual(cloned.objectives[1].expression, "m3") - # objective= and objectives= are mutually exclusive in clone_with_args - with self.assertRaisesRegex(UserInputError, "Cannot specify both"): - config.clone_with_args(objective=self.obj1, objectives=[self.obj1]) - # repr always uses "objectives=" self.assertIn("objectives=", repr(config)) single_config = OptimizationConfig(objectives=[self.obj1]) @@ -815,7 +805,7 @@ def test_Clone(self) -> None: objectives=[self.objectives["o1"], self.objectives["o3"]] ) cloned_with_diff_objective = config.clone_with_args( - objective=different_objective + objectives=[different_objective] ) self.assertEqual( cloned_with_diff_objective.objective.expression, diff --git a/ax/service/tests/test_best_point.py b/ax/service/tests/test_best_point.py index 03393107221..78c48769aef 100644 --- a/ax/service/tests/test_best_point.py +++ b/ax/service/tests/test_best_point.py @@ -60,12 +60,14 @@ def test_get_trace(self) -> None: self.assertEqual(get_trace(exp), [11, 10, 9, 9, 5]) # Same experiment with maximize via new optimization config. - opt_conf = none_throws(exp.optimization_config).clone() - opt_conf.objective = Objective( - expression=opt_conf.objective.metric_names[0], - metric_name_to_signature={ - opt_conf.objective.metric_names[0]: opt_conf.objective.metric_names[0] - }, + metric_name = none_throws(exp.optimization_config).objective.metric_names[0] + opt_conf = none_throws(exp.optimization_config).clone_with_args( + objectives=[ + Objective( + expression=metric_name, + metric_name_to_signature={metric_name: metric_name}, + ) + ], ) self.assertEqual(get_trace(exp, opt_conf), [11, 11, 11, 15, 15]) @@ -441,12 +443,14 @@ def test_get_best_observed_value(self) -> None: ) self.assertEqual(get_best(exp), 5) # Same experiment with maximize via new optimization config. - opt_conf = none_throws(exp.optimization_config).clone() - opt_conf.objective = Objective( - expression=opt_conf.objective.metric_names[0], - metric_name_to_signature={ - opt_conf.objective.metric_names[0]: opt_conf.objective.metric_names[0] - }, + metric_name = none_throws(exp.optimization_config).objective.metric_names[0] + opt_conf = none_throws(exp.optimization_config).clone_with_args( + objectives=[ + Objective( + expression=metric_name, + metric_name_to_signature={metric_name: metric_name}, + ) + ], ) self.assertEqual(get_best(exp, opt_conf), 15) diff --git a/ax/service/tests/test_best_point_utils.py b/ax/service/tests/test_best_point_utils.py index dc597cd2006..c8b80851188 100644 --- a/ax/service/tests/test_best_point_utils.py +++ b/ax/service/tests/test_best_point_utils.py @@ -615,7 +615,9 @@ def test_best_raw_objective_point_scalarized(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - objective=ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + objectives=[ + ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + ], ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(exp) @@ -637,11 +639,16 @@ def test_best_raw_objective_point_scalarized_multi(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - objective=ScalarizedObjective( - metrics=[get_branin_metric(), get_branin_metric(lower_is_better=False)], - weights=[0.1, -0.9], - minimize=True, - ) + objectives=[ + ScalarizedObjective( + metrics=[ + get_branin_metric(), + get_branin_metric(lower_is_better=False), + ], + weights=[0.1, -0.9], + minimize=True, + ) + ], ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(experiment=exp) @@ -1037,11 +1044,13 @@ def test_best_parameters_from_model_predictions_scalarized(self) -> None: ) exp.add_tracking_metric(metric2) exp.optimization_config = OptimizationConfig( - objective=ScalarizedObjective( - metrics=[metric1, metric2], - weights=[0.5, 0.5], - minimize=True, - ) + objectives=[ + ScalarizedObjective( + metrics=[metric1, metric2], + weights=[0.5, 0.5], + minimize=True, + ) + ], ) # Run trials and generate data diff --git a/ax/storage/sqa_store/tests/test_sqa_store.py b/ax/storage/sqa_store/tests/test_sqa_store.py index f7b98b8d0a5..f310fbeba6b 100644 --- a/ax/storage/sqa_store/tests/test_sqa_store.py +++ b/ax/storage/sqa_store/tests/test_sqa_store.py @@ -1270,9 +1270,10 @@ def test_experiment_objective_updates(self) -> None: # update objective # (should perform update in place) - optimization_config = get_optimization_config() objective = get_objective(minimize=True) - optimization_config.objective = objective + optimization_config = get_optimization_config().clone_with_args( + objectives=[objective] + ) experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( @@ -1282,8 +1283,8 @@ def test_experiment_objective_updates(self) -> None: # replace objective # (old one should become tracking metric) experiment.add_tracking_metric(Metric(name="objective")) - optimization_config.objective = Objective( - metric=Metric(name="objective"), minimize=False + optimization_config = optimization_config.clone_with_args( + objectives=[Objective(metric=Metric(name="objective"), minimize=False)] ) experiment.optimization_config = optimization_config save_experiment(experiment)