From 4cd4955c9b5607354cfae7aa70cf9a7e8c1c408f Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 7 Apr 2026 06:41:24 -0700 Subject: [PATCH 1/5] Design doc: Restrict Objective to single/scalarized & simplify OptimizationConfig Summary: Implementation plan for Option 3 from the design doc: https://docs.google.com/document/d/1EGQYmBjiNGtYapXu1RLHEBdA5Yz2c7q17acX3es0yV8/edit This restricts Objective to represent a single (possibly scalarized) objective, moves multi-objective representation to OptimizationConfig(objectives=list[Objective]), adds threshold/relative_threshold fields to Objective, and deprecates MultiObjectiveOptimizationConfig, MultiObjective, ScalarizedObjective, and ObjectiveThreshold. The plan is a 6-diff stack, each backward-compatible: 1. Add objectives: list[Objective] to OptimizationConfig 2. Update MOOC & PreferenceOptimizationConfig to use objectives list 3. Restrict Objective to single/scalarized (reject commas) 4. Migrate isinstance(_, MOOC) checks to is_moo_problem 5. Add threshold/relative_threshold to Objective + storage 6. Remove all internal usage of deprecated classes Differential Revision: D99386109 --- ...ict-objective-simplify-optconfig-design.md | 280 ++++++++++++++++++ 1 file changed, 280 insertions(+) create mode 100644 ax/docs/plans/2026-04-02-restrict-objective-simplify-optconfig-design.md diff --git a/ax/docs/plans/2026-04-02-restrict-objective-simplify-optconfig-design.md b/ax/docs/plans/2026-04-02-restrict-objective-simplify-optconfig-design.md new file mode 100644 index 00000000000..e27d91b0032 --- /dev/null +++ b/ax/docs/plans/2026-04-02-restrict-objective-simplify-optconfig-design.md @@ -0,0 +1,280 @@ +# Design: Restrict Objective to Single/Scalarized & Simplify OptimizationConfig + +**Date:** 2026-04-02 +**Author:** Sait Cakmak +**Design Doc:** https://docs.google.com/document/d/1EGQYmBjiNGtYapXu1RLHEBdA5Yz2c7q17acX3es0yV8/edit +**Selected Option:** Option 3 -- Restrict Objective to single, possibly scalarized objective +**Prerequisite:** D98837790 (Move metric_name_to_signature from Adapter to Objective/OutcomeConstraint) + +## Summary + +This plan implements Option 3 from the design doc: restrict `Objective` to represent +a single (possibly scalarized) objective, move multi-objective representation to +`OptimizationConfig(objectives=list[Objective])`, add `threshold`/`relative_threshold` +fields to `Objective`, and deprecate `MultiObjectiveOptimizationConfig`, `MultiObjective`, +`ScalarizedObjective`, and `ObjectiveThreshold` (keeping them as deprecated shims, +removing all internal usage). + +## Diff Stack + +Each diff is backward-compatible. Summaries should reference this design doc. + +--- + +### Diff 1: Add `objectives: list[Objective]` to `OptimizationConfig` + +**Goal:** Enable the new `OptimizationConfig(objectives=[...])` construction path +without breaking any existing code. + +**Files:** +- `ax/core/optimization_config.py` +- `ax/core/tests/test_optimization_config.py` + +**Changes:** + +1. `OptimizationConfig.__init__` accepts new kwarg `objectives: list[Objective] | None = None` + - Mutually exclusive with `objective` -- raise `UserInputError` if both provided + - If `objectives` is provided, store as `self._objectives: list[Objective]` + - If `objective` is provided (existing path), wrap as `self._objectives = [objective]` + - Validate: no duplicate metric names across objectives in the list + - Validate: each objective in the list must not be multi-objective (no comma expressions) + +2. `objectives` property: returns `self._objectives` + +3. `objective` property: returns `self._objectives[0]` if `len == 1`, raises + `UserInputError("Access individual objectives via `objectives` property for + multi-objective configs.")` if `len > 1` + +4. `is_moo_problem` property: `len(self._objectives) > 1` + (replaces current `self.objective.is_multi_objective`) + +5. `metric_names` property: aggregate across all objectives + constraints + +6. `metric_name_to_signature` property: aggregate across all objectives + constraints + +7. `metric_signatures` property: aggregate across all objectives + constraints + +8. `objective_thresholds` property: filter constraints matching any objective metric name + +9. `_validate_transformed_optimization_config`: drop the "does not support + MultiObjective" error. Add validation that objectives don't share metrics + and that objective metrics aren't constrained. + +10. `clone_with_args`: support cloning with `objectives` list + +11. Tests: add construction tests for `OptimizationConfig(objectives=[obj1, obj2])`, + test `is_moo_problem`, test `objective` raises for MOO, test metric aggregation + +--- + +### Diff 2: Update `MultiObjectiveOptimizationConfig` and `PreferenceOptimizationConfig` + +**Goal:** Make MOOC use the new `objectives` list internally. Deprecate MOOC. + +**Files:** +- `ax/core/optimization_config.py` +- `ax/core/tests/test_optimization_config.py` + +**Changes:** + +1. `MOOC.__init__`: + - Accept `objectives: list[Objective] | None = None` kwarg (new path) + - If legacy `objective` kwarg is used with a comma-separated multi-objective + expression, decompose into individual objectives + - If `objective_thresholds` list is provided, resolve each threshold onto + the matching Objective (store on Objective for now as a temporary attribute, + to be replaced by `threshold` field in Diff 5) + - Emit `DeprecationWarning` for the class itself + - Call `super().__init__(objectives=objectives_list, ...)` + +2. `MOOC.objective_thresholds` property: return from stored `_objective_thresholds` + OR synthesize from objectives (backward compat) + +3. `PreferenceOptimizationConfig`: + - Accept `objectives: list[Objective] | None` kwarg + - Validate `len(objectives) > 1` instead of `isinstance(objective, MultiObjective)` + - Deprecation warning for passing `objective` with multi-objective expression + +4. Tests: update MOOC and PreferenceOptimizationConfig tests + +--- + +### Diff 3: Restrict `Objective` to single/scalarized expressions + +**Goal:** Make `Objective` reject comma-separated (multi-objective) expressions. +Safe because Diffs 1-2 provide the `objectives=[...]` alternative. + +**Files:** +- `ax/core/objective.py` +- `ax/core/tests/test_objective.py` +- Any internal callers that construct comma-separated Objectives (update to use + `OptimizationConfig(objectives=[...])`) + +**Changes:** + +1. `Objective.__init__`: after parsing the expression, if it contains commas + (i.e., `parse_objective_expression` returns a tuple), raise `UserInputError` + with migration guidance to use `OptimizationConfig(objectives=[...])`. + +2. Remove `is_multi_objective` property (always False now, no longer meaningful). + Add a deprecated shim that warns and returns False. + +3. Remove `is_single_objective` property (redundant -- it's always `not is_scalarized`). + Add a deprecated shim. + +4. `MultiObjective.__init__`: raise `NotImplementedError` with message: + "MultiObjective is removed. Use OptimizationConfig(objectives=[...]) instead." + +5. Update all internal callers that construct `Objective(expression="acc, -loss")` + to use `OptimizationConfig(objectives=[Objective("acc"), Objective("-loss")])`. + Key files: + - `ax/core/optimization_config.py` (MOOC validation) + - `ax/core/experiment.py` + - `ax/adapter/adapter_utils.py` + - `ax/storage/json_store/decoder.py` + - `ax/storage/sqa_store/encoder.py` / `decoder.py` + +6. Tests: verify comma expressions raise, update multi-objective test construction + +--- + +### Diff 4: Migrate `isinstance(_, MOOC)` checks to `is_moo_problem` + +**Goal:** Mechanical replacement of isinstance checks. Large but safe. + +**Files:** ~24 files across adapter/, service/, benchmark/, analysis/, +generators/, storage/, early_stopping/, global_stopping/, fb/ + +**Changes:** + +Replace all `isinstance(opt_config, MultiObjectiveOptimizationConfig)` with +`opt_config.is_moo_problem`. Approximately 30 occurrences. + +Key files (non-exhaustive): +- `ax/adapter/torch.py` (2 occurrences) +- `ax/adapter/adapter_utils.py` (1) +- `ax/adapter/transforms/objective_as_constraint.py` (2) +- `ax/adapter/transforms/standardize_y.py` (1) +- `ax/adapter/transforms/relativize.py` (1) +- `ax/adapter/transforms/derelativize.py` (1) +- `ax/adapter/transforms/power_transform_y.py` (1) +- `ax/adapter/transforms/stratified_standardize_y.py` (1) +- `ax/adapter/transforms/log_y.py` (1) +- `ax/service/utils/best_point.py` (2) +- `ax/benchmark/benchmark_problem.py` (3) +- `ax/benchmark/benchmark.py` (1) +- `ax/core/experiment.py` (1) +- `ax/storage/json_store/encoders.py` (1) +- `ax/storage/sqa_store/encoder.py` (1) +- `ax/global_stopping/strategies/improvement.py` (1) +- `ax/analysis/plotly/objective_p_feasible_frontier.py` (1) +- `ax/analysis/healthcheck/early_stopping_healthcheck.py` (1) +- `ax/early_stopping/dispatch.py` (1) +- `ax/fb/early_stopping/strategies/multi_objective.py` (1) + +Also migrate remaining `isinstance(_, MultiObjective)` checks (~4 in production) +to `objective.is_multi_objective` (which is now deprecated) or the new +`opt_config.is_moo_problem`. + +**Consider splitting** into sub-diffs by module if > 500 lines. + +--- + +### Diff 5: Add `threshold` and `relative_threshold` to `Objective` + +**Goal:** Co-locate objective thresholds with their objectives. + +**Files:** +- `ax/core/objective.py` +- `ax/core/optimization_config.py` +- `ax/core/tests/test_objective.py` +- `ax/core/tests/test_optimization_config.py` +- `ax/storage/json_store/encoders.py` +- `ax/storage/json_store/decoder.py` +- `ax/storage/sqa_store/encoder.py` +- `ax/storage/sqa_store/decoder.py` +- `ax/storage/sqa_store/sqa_classes.py` (if SQA columns needed) + +**Changes:** + +1. `Objective.__init__` accepts `threshold: float | None = None` and + `relative_threshold: float | None = None` + - Store as `self._threshold` and `self._relative_threshold` + - Properties with getters/setters + +2. `OptimizationConfig.objective_thresholds` property: synthesize + `OutcomeConstraint` objects from each objective's threshold/relative_threshold + (for downstream compat with adapter layer's `extract_objective_thresholds`) + +3. `MOOC.__init__`: when `objective_thresholds` list is provided, resolve each + `OutcomeConstraint` to the matching `Objective.threshold` (or + `relative_threshold`). Validate no conflicts. + +4. `Objective.clone()`: preserve threshold fields + +5. Storage: + - JSON: add `"threshold"` and `"relative_threshold"` to `objective_to_dict`. + Decoder: read these fields, defaulting to `None` for old data. + - SQA: add nullable columns or store in `properties` dict. + +6. Tests: construction, serialization round-trip, MOOC threshold resolution + +--- + +### Diff 6: Cleanup -- Remove internal usage of deprecated classes + +**Goal:** All internal Ax code uses the new patterns. Deprecated classes remain +as shims for external consumers. + +**Files:** Broad -- all files that import/use `MultiObjectiveOptimizationConfig`, +`MultiObjective`, `ScalarizedObjective`, `ObjectiveThreshold`. + +**Changes:** + +1. Replace all internal construction of `MOOC(...)` with + `OptimizationConfig(objectives=[...])` + +2. Replace all internal construction of `MultiObjective([...])` with + individual `Objective` instances in a list + +3. Replace all internal construction of `ObjectiveThreshold(...)` with + `Objective(..., threshold=...)` or `OutcomeConstraint(...)` + +4. Remove internal imports of deprecated classes (keep re-exports for external compat) + +5. Strengthen deprecation warnings (add removal timeline) + +6. Clean up dead code paths, unused helper functions + +**Consider splitting** into sub-diffs: core, adapter, service, storage, benchmark, +analysis, fb. + +--- + +## Key Design Decisions + +1. **`objectives` list on base `OptimizationConfig`** -- not a separate class. + Multi-objective is a property (`is_moo_problem`), not a type. + +2. **`objective` property raises for MOO** -- forces callers to use `objectives` + for multi-objective, preventing silent bugs from accessing only the first objective. + +3. **Thresholds on `Objective`** -- `threshold` (absolute) and `relative_threshold` + (percent change from status quo). When both are set, the more stringent one + is used after un-relativization. + +4. **Deprecated classes kept as shims** -- `MultiObjective`, `ScalarizedObjective`, + `ObjectiveThreshold`, `MultiObjectiveOptimizationConfig` remain importable but + emit deprecation warnings. Internal usage is removed. + +5. **Backward-compatible storage** -- old serialized data (without `objectives` list + or `threshold` fields) deserializes correctly via fallback paths. + +## Risks and Mitigations + +- **Large surface area:** ~70 files reference these classes. Mitigated by splitting + into 6+ focused diffs and running full test suites. +- **Storage backward compat:** Old experiments must still load. Mitigated by + keeping decoder fallback paths and testing with existing fixtures. +- **External consumers:** Meta-internal code outside ax/ may use deprecated classes. + Mitigated by keeping shims and using deprecation warnings before removal. From 2a6d0b4f4cdabe9c8febc7dbe799af938bb22266 Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 7 Apr 2026 11:06:44 -0700 Subject: [PATCH 2/5] Add `objectives: list[Objective]` to OptimizationConfig Summary: Part of the Restrict Objective to Single/Scalarized & Simplify OptimizationConfig design (see design doc: https://docs.google.com/document/d/1EGQYmBjiNGtYapXu1RLHEBdA5Yz2c7q17acX3es0yV8/edit). This is Diff 1 of the stack: enables the new `OptimizationConfig(objectives=[...])` construction path without breaking any existing code. Changes: - `OptimizationConfig.__init__` and `clone_with_args` are now keyword-only across `OptimizationConfig`, `MOOC`, and `PreferenceOptimizationConfig`. All positional callers updated. - New kwarg `objectives: list[Objective] | None = None`, mutually exclusive with `objective`, on `__init__` and `clone_with_args` of all three classes (`OptimizationConfig`, `MOOC`, and `PreferenceOptimizationConfig`). - Internally stores `self._objectives: list[Objective]` (both paths). - New `objectives` property returns the list. - `objective` property raises `UnsupportedError` if `len > 1`. - `is_moo_problem` property: True when multiple objectives or legacy multi-objective expression. - `metric_names`, `metric_name_to_signature`, `metric_signatures` aggregate across all objectives + constraints. - `__repr__` always uses `objectives=`. - JSON storage: encoder uses `objectives` key; decoder has backward compat to convert old `objective` key to `objectives` list. - SQA storage: encoder iterates `objectives` to encode each one; decoder collects multiple OBJECTIVE rows and reconstructs `OptimizationConfig(objectives=...)` when `len > 1`. - Validation ensures no duplicate metrics across objectives and no multi-objective expressions in individual list elements. Differential Revision: D99387020 --- ax/adapter/tests/test_torch_adapter.py | 2 +- ax/core/optimization_config.py | 284 +++++++++++++----- ax/core/tests/test_multi_type_experiment.py | 2 +- ax/core/tests/test_optimization_config.py | 109 ++++++- ax/orchestration/tests/test_orchestrator.py | 4 +- ax/service/tests/test_best_point_utils.py | 6 +- ax/service/tests/test_report_utils.py | 20 +- ax/storage/json_store/decoder.py | 7 + ax/storage/json_store/encoders.py | 11 +- .../json_store/tests/test_json_store.py | 2 + ax/storage/sqa_store/decoder.py | 12 +- ax/storage/sqa_store/encoder.py | 13 +- ax/storage/sqa_store/tests/test_sqa_store.py | 13 + ax/utils/testing/core_stubs.py | 13 +- 14 files changed, 390 insertions(+), 108 deletions(-) diff --git a/ax/adapter/tests/test_torch_adapter.py b/ax/adapter/tests/test_torch_adapter.py index 63b5e55195d..eb21d5b2469 100644 --- a/ax/adapter/tests/test_torch_adapter.py +++ b/ax/adapter/tests/test_torch_adapter.py @@ -1205,7 +1205,7 @@ def test_pairwise_preference_generator(self) -> None: surrogate=surrogate, ), optimization_config=OptimizationConfig( - Objective( + objective=Objective( metric=Metric(Keys.PAIRWISE_PREFERENCE_QUERY.value), minimize=False, ) diff --git a/ax/core/optimization_config.py b/ax/core/optimization_config.py index c2289916da1..c1b1694a323 100644 --- a/ax/core/optimization_config.py +++ b/ax/core/optimization_config.py @@ -15,8 +15,9 @@ from ax.core.arm import Arm from ax.core.objective import Objective from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint -from ax.exceptions.core import UserInputError +from ax.exceptions.core import UnsupportedError, UserInputError from ax.utils.common.base import Base +from pyre_extensions import none_throws TRefPoint = list[OutcomeConstraint] @@ -39,9 +40,12 @@ class OptimizationConfig(Base): - """An optimization configuration, which comprises an objective + """An optimization configuration, which comprises one or more objectives and outcome constraints. + For single-objective optimization, pass a single ``objective``. + For multi-objective optimization, pass a list of ``objectives``. + There is no minimum or maximum number of outcome constraints, but an individual metric can have at most two constraints--which is how we represent metrics with both upper and lower bounds. @@ -49,14 +53,20 @@ class OptimizationConfig(Base): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: list[OutcomeConstraint] | None = None, pruning_target_parameterization: Arm | None = None, ) -> None: """Inits OptimizationConfig. Args: - objective: Metric+direction to use for the optimization. + objective: Metric+direction to use for the optimization. Mutually + exclusive with ``objectives``. + objectives: List of objectives for multi-objective optimization. + Mutually exclusive with ``objective``. Each element must be a + single or scalarized Objective (not multi-objective). outcome_constraints: Constraints on metrics. pruning_target_parameterization: Arm containing the target values for irrelevant parameters. The target values are used to prune irrelevant @@ -70,14 +80,29 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives`. " + "Use `objective` for single-objective optimization or " + "`objectives` for multi-objective optimization." + ) + if objective is None and objectives is None: + raise UserInputError("Must specify either `objective` or `objectives`.") + + if objectives is not None: + if len(objectives) == 0: + raise UserInputError("`objectives` must not be empty.") + self._objectives: list[Objective] = objectives + else: + self._objectives = [none_throws(objective)] + constraints: list[OutcomeConstraint] = ( [] if outcome_constraints is None else outcome_constraints ) self._validate_transformed_optimization_config( - objective=objective, + objectives=self._objectives, outcome_constraints=constraints, ) - self._objective: Objective = objective self._outcome_constraints: list[OutcomeConstraint] = constraints self.pruning_target_parameterization = pruning_target_parameterization @@ -87,13 +112,34 @@ def clone(self) -> Self: def clone_with_args( self, + *, objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, pruning_target_parameterization: Arm | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> Self: - """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + """Make a copy of this optimization config. + + Args: + objective: Replace with a single objective. Mutually exclusive + with ``objectives``. + objectives: Replace with a list of objectives. Mutually exclusive + with ``objective``. + outcome_constraints: Replace outcome constraints. Pass ``None`` + to clear them. + pruning_target_parameterization: Replace the pruning target. + """ + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives` in clone_with_args." + ) + if objective is not None: + cloned_objectives = [objective] + elif objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] outcome_constraints = ( [constraint.clone() for constraint in self.outcome_constraints] if outcome_constraints is _NO_OUTCOME_CONSTRAINTS @@ -106,23 +152,44 @@ def clone_with_args( ) return self.__class__( - objective=objective, + objectives=cloned_objectives, outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) + @property + def objectives(self) -> list[Objective]: + """Get the list of objectives. + + For single-objective optimization, this returns a single-element list. + For multi-objective optimization, this returns all objectives. + """ + return self._objectives + @property def objective(self) -> Objective: - """Get objective.""" - return self._objective + """Get the single objective. + + For single-objective or scalarized-objective configs, returns the + objective. For multi-objective configs (multiple objectives in the + list), raises ``UserInputError`` -- use ``objectives`` instead. + """ + if len(self._objectives) > 1: + raise UnsupportedError( + "This OptimizationConfig has multiple objectives. " + "Use `objectives` to access the list of objectives, or " + "iterate over individual objectives." + ) + return self._objectives[0] @objective.setter def objective(self, objective: Objective) -> None: - """Set objective if not present in outcome constraints.""" + """Set objective. Only valid for single-objective configs.""" self._validate_transformed_optimization_config( - objective, self.outcome_constraints + objectives=[objective], + outcome_constraints=self.outcome_constraints, ) - self._objective = objective + self._objectives = [objective] @property def all_constraints(self) -> list[OutcomeConstraint]: @@ -136,17 +203,26 @@ def outcome_constraints(self) -> list[OutcomeConstraint]: @property def objective_thresholds(self) -> list[OutcomeConstraint]: - """Get objective thresholds.""" + """Get objective thresholds. + + Returns outcome constraints whose primary metric is an objective + metric. + """ + all_obj_metric_names: set[str] = set() + for obj in self._objectives: + all_obj_metric_names.update(obj.metric_names) return [ threshold for threshold in self.outcome_constraints - if threshold.metric_names[0] in self.objective.metric_names + if threshold.metric_names[0] in all_obj_metric_names ] @property def metric_names(self) -> set[str]: - """All metric names referenced by the objective and constraints.""" - names: set[str] = set(self.objective.metric_names) + """All metric names referenced by the objectives and constraints.""" + names: set[str] = set() + for obj in self._objectives: + names.update(obj.metric_names) for oc in self.all_constraints: names.update(oc.metric_names) return names @@ -154,10 +230,11 @@ def metric_names(self) -> set[str]: @property def metric_name_to_signature(self) -> dict[str, str]: """Aggregated mapping from all metric names to their canonical - signatures, across the objective and all constraints. + signatures, across all objectives and all constraints. """ mapping: dict[str, str] = {} - mapping.update(self.objective.metric_name_to_signature) + for obj in self._objectives: + mapping.update(obj.metric_name_to_signature) for constraint in self.all_constraints: mapping.update(constraint.metric_name_to_signature) return mapping @@ -165,22 +242,30 @@ def metric_name_to_signature(self) -> dict[str, str]: def update_metric_name_to_signature_mapping( self, mapping: Mapping[str, str] ) -> None: - """Set the metric name to signature mapping on the objective and all + """Set the metric name to signature mapping on all objectives and constraints. """ - self.objective.update_metric_name_to_signature_mapping(mapping) + for obj in self._objectives: + obj.update_metric_name_to_signature_mapping(mapping) for constraint in self.all_constraints: constraint.update_metric_name_to_signature_mapping(mapping) @property def metric_signatures(self) -> set[str]: - """All metric signatures referenced by the objective and constraints.""" + """All metric signatures referenced by the objectives and constraints.""" mapping = self.metric_name_to_signature return {mapping[name] for name in self.metric_names} @property def is_moo_problem(self) -> bool: - return self.objective is not None and self.objective.is_multi_objective + """Whether this is a multi-objective optimization problem. + + True when there are multiple objectives in the list, or when a single + objective is a (legacy) multi-objective expression. + """ + if len(self._objectives) > 1: + return True + return self._objectives[0].is_multi_objective @property def is_bope_problem(self) -> bool: @@ -195,37 +280,53 @@ def is_bope_problem(self) -> bool: @outcome_constraints.setter def outcome_constraints(self, outcome_constraints: list[OutcomeConstraint]) -> None: """Set outcome constraints if valid, else raise.""" - self._validate_transformed_optimization_config( - objective=self.objective, + unconstrainable: list[str] = [] + for obj in self._objectives: + unconstrainable.extend(obj.get_unconstrainable_metric_names()) + self._validate_outcome_constraints( + unconstrainable_metric_names=unconstrainable, outcome_constraints=outcome_constraints, ) self._outcome_constraints = outcome_constraints @staticmethod def _validate_transformed_optimization_config( - objective: Objective, + objectives: list[Objective], outcome_constraints: list[OutcomeConstraint] | None = None, ) -> None: - """Ensure outcome constraints are valid. + """Validate objectives and outcome constraints. - Either one or two outcome constraints can reference one metric. - If there are two constraints, they must have different 'ops': one - LEQ and one GEQ. - If there are two constraints, the bound of the GEQ op must be less - than the bound of the LEQ op. + Ensures no multi-objective expressions in individual objectives, + no duplicate metrics across objectives, outcome constraints don't + constrain objective metrics, and that constraint pairs on the + same metric are valid. + + Subclasses (e.g. ``MultiObjectiveOptimizationConfig``) override + this to allow multi-objective expressions. Args: - objective: Metric+direction to use for the optimization. + objectives: List of objectives to validate. outcome_constraints: Constraints to validate. """ - if objective.is_multi_objective: - # Raise error on multi-objective; `ScalarizedObjective` is OK - raise ValueError( - "OptimizationConfig does not support MultiObjective. " - "Use MultiObjectiveOptimizationConfig instead." - ) + all_metric_names: list[str] = [] + for obj in objectives: + if obj.is_multi_objective: + raise ValueError( + "Each objective in `objectives` must be a single or " + "scalarized objective, not a multi-objective. " + "Pass each sub-objective as a separate list element." + ) + for name in obj.metric_names: + if name in all_metric_names: + raise UserInputError( + f"Metric '{name}' appears in multiple objectives. " + "Each metric can only appear in one objective." + ) + all_metric_names.append(name) outcome_constraints = outcome_constraints or [] - unconstrainable_metric_names = objective.get_unconstrainable_metric_names() + unconstrainable_metric_names: list[str] = [] + for obj in objectives: + unconstrainable_metric_names.extend(obj.get_unconstrainable_metric_names()) OptimizationConfig._validate_outcome_constraints( unconstrainable_metric_names=unconstrainable_metric_names, outcome_constraints=outcome_constraints, @@ -274,7 +375,7 @@ def constraint_key(oc: OutcomeConstraint) -> str: def __repr__(self) -> str: return ( f"{self.__class__.__name__}(" - "objective=" + repr(self.objective) + ", " + "objectives=" + repr(self._objectives) + ", " "outcome_constraints=" + repr(self.outcome_constraints) + ")" ) @@ -299,7 +400,9 @@ class MultiObjectiveOptimizationConfig(OptimizationConfig): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: list[OutcomeConstraint] | None = None, objective_thresholds: list[OutcomeConstraint] | None = None, pruning_target_parameterization: Arm | None = None, @@ -308,9 +411,12 @@ def __init__( Args: objective: Metric+direction to use for the optimization. Should be either a - MultiObjective or a ScalarizedObjective. + MultiObjective or a ScalarizedObjective. Mutually exclusive with + ``objectives``. + objectives: List containing the objective. Mutually exclusive + with ``objective``. outcome_constraints: Constraints on metrics. - objective_thesholds: Thresholds objectives must exceed. Used for + objective_thresholds: Thresholds objectives must exceed. Used for multi-objective optimization and for calculating frontiers and hypervolumes. pruning_target_parameterization: Arm containing the target values for @@ -325,24 +431,26 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ - constraints: list[OutcomeConstraint] = ( - [] if outcome_constraints is None else outcome_constraints + super().__init__( + objective=objective, + objectives=objectives, + outcome_constraints=outcome_constraints, + pruning_target_parameterization=pruning_target_parameterization, ) - objective_thresholds = objective_thresholds or [] + # Validate and set objective thresholds (MOOC-specific). + self._objective_thresholds: list[OutcomeConstraint] = objective_thresholds or [] self._validate_transformed_optimization_config( - objective=objective, - outcome_constraints=constraints, - objective_thresholds=objective_thresholds, + objectives=self._objectives, + outcome_constraints=self._outcome_constraints, + objective_thresholds=self._objective_thresholds, ) - self._objective: Objective = objective - self._outcome_constraints: list[OutcomeConstraint] = constraints - self._objective_thresholds: list[OutcomeConstraint] = objective_thresholds - self.pruning_target_parameterization = pruning_target_parameterization # pyre-fixme[14]: Inconsistent override. def clone_with_args( self, + *, objective: Objective | None = None, + objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, objective_thresholds: None | (list[OutcomeConstraint]) = _NO_OBJECTIVE_THRESHOLDS, @@ -350,7 +458,16 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> "MultiObjectiveOptimizationConfig": """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives` in clone_with_args." + ) + if objective is not None: + cloned_objectives = [objective] + elif objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] outcome_constraints = ( [constraint.clone() for constraint in self.outcome_constraints] if outcome_constraints is _NO_OUTCOME_CONSTRAINTS @@ -367,7 +484,7 @@ def clone_with_args( else pruning_target_parameterization ) return MultiObjectiveOptimizationConfig( - objective=objective, + objectives=cloned_objectives, outcome_constraints=outcome_constraints, objective_thresholds=objective_thresholds, pruning_target_parameterization=pruning_target_parameterization, @@ -375,18 +492,18 @@ def clone_with_args( @property def objective(self) -> Objective: - """Get objective.""" - return self._objective + """Get the (multi-)objective.""" + return self._objectives[0] @objective.setter def objective(self, objective: Objective) -> None: """Set objective if not present in outcome constraints.""" self._validate_transformed_optimization_config( - objective=objective, + objectives=[objective], outcome_constraints=self.outcome_constraints, objective_thresholds=self.objective_thresholds, ) - self._objective = objective + self._objectives = [objective] @property def all_constraints(self) -> list[OutcomeConstraint]: @@ -404,7 +521,7 @@ def objective_thresholds( ) -> None: """Set outcome constraints if valid, else raise.""" self._validate_transformed_optimization_config( - objective=self.objective, + objectives=self._objectives, objective_thresholds=objective_thresholds, ) self._objective_thresholds = objective_thresholds @@ -418,7 +535,7 @@ def objective_thresholds_dict(self) -> dict[str, OutcomeConstraint]: @staticmethod def _validate_transformed_optimization_config( - objective: Objective, + objectives: list[Objective], outcome_constraints: list[OutcomeConstraint] | None = None, objective_thresholds: list[OutcomeConstraint] | None = None, ) -> None: @@ -431,10 +548,11 @@ def _validate_transformed_optimization_config( than the bound of the LEQ op. Args: - objective: Metric+direction to use for the optimization. + objectives: List of objectives to validate. outcome_constraints: Constraints to validate. objective_thresholds: Thresholds objectives must exceed. """ + objective = objectives[0] if not (objective.is_multi_objective or objective.is_scalarized_objective): raise TypeError( "`MultiObjectiveOptimizationConfig` requires an objective " @@ -514,7 +632,9 @@ def check_objective_thresholds_match_objectives( class PreferenceOptimizationConfig(MultiObjectiveOptimizationConfig): def __init__( self, - objective: Objective, + *, + objective: Objective | None = None, + objectives: list[Objective] | None = None, preference_profile_name: str, outcome_constraints: list[OutcomeConstraint] | None = None, expect_relativized_outcomes: bool = False, @@ -524,7 +644,9 @@ def __init__( Args: objective: Metric+direction to use for the optimization. Should be a - MultiObjective. + MultiObjective. Mutually exclusive with ``objectives``. + objectives: List containing the objective. Mutually exclusive + with ``objective``. preference_profile_name: The name of the auxiliary experiment to use as the preference profile for the experiment. An auxiliary experiment with this name and purpose PE_EXPERIMENT should be attached to @@ -548,12 +670,6 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ - if not objective.is_multi_objective: - raise TypeError( - "`PreferenceOptimizationConfig` requires a multi-objective. " - "Use `OptimizationConfig` instead if using a " - "single-metric objective." - ) if outcome_constraints: raise NotImplementedError( "Outcome constraints are not yet supported in " @@ -563,10 +679,19 @@ def __init__( # Call parent's __init__ with objective_thresholds=None super().__init__( objective=objective, + objectives=objectives, outcome_constraints=outcome_constraints, objective_thresholds=None, pruning_target_parameterization=pruning_target_parameterization, ) + # Validate that the objective is multi-objective (after super sets + # self._objectives). + if not self._objectives[0].is_multi_objective: + raise TypeError( + "`PreferenceOptimizationConfig` requires a multi-objective. " + "Use `OptimizationConfig` instead if using a " + "single-metric objective." + ) self.preference_profile_name = preference_profile_name self.expect_relativized_outcomes = expect_relativized_outcomes @@ -583,7 +708,9 @@ def is_bope_problem(self) -> bool: # pyre-ignore[14]: Inconsistent override. def clone_with_args( self, + *, objective: Objective | None = None, + objectives: list[Objective] | None = None, preference_profile_name: str | None = None, outcome_constraints: list[OutcomeConstraint] | None = _NO_OUTCOME_CONSTRAINTS, expect_relativized_outcomes: bool | None = None, @@ -591,7 +718,16 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> PreferenceOptimizationConfig: """Make a copy of this optimization config.""" - objective = self.objective.clone() if objective is None else objective + if objective is not None and objectives is not None: + raise UserInputError( + "Cannot specify both `objective` and `objectives` in clone_with_args." + ) + if objective is not None: + cloned_objectives = [objective] + elif objectives is not None: + cloned_objectives = objectives + else: + cloned_objectives = [obj.clone() for obj in self._objectives] preference_profile_name = ( self.preference_profile_name @@ -615,7 +751,7 @@ def clone_with_args( ) return PreferenceOptimizationConfig( - objective=objective, + objectives=cloned_objectives, preference_profile_name=preference_profile_name, outcome_constraints=outcome_constraints, expect_relativized_outcomes=expect_relativized_outcomes, diff --git a/ax/core/tests/test_multi_type_experiment.py b/ax/core/tests/test_multi_type_experiment.py index b314cfd5b75..7775cea3f5b 100644 --- a/ax/core/tests/test_multi_type_experiment.py +++ b/ax/core/tests/test_multi_type_experiment.py @@ -154,7 +154,7 @@ def test_setting_opt_config(self) -> None: m3 = BraninMetric("m3", ["x1", "x2"]) self.experiment.add_tracking_metric(m3) self.experiment.optimization_config = OptimizationConfig( - Objective(metric=m3, minimize=True) + objective=Objective(metric=m3, minimize=True) ) self.assertDictEqual( self.experiment._metric_to_trial_type, diff --git a/ax/core/tests/test_optimization_config.py b/ax/core/tests/test_optimization_config.py index 29f253998d0..c8b5c713883 100644 --- a/ax/core/tests/test_optimization_config.py +++ b/ax/core/tests/test_optimization_config.py @@ -21,14 +21,14 @@ ScalarizedOutcomeConstraint, ) from ax.core.types import ComparisonOp -from ax.exceptions.core import UserInputError +from ax.exceptions.core import UnsupportedError, UserInputError from ax.utils.common.testutils import TestCase from pyre_extensions import assert_is_instance OC_STR = ( "OptimizationConfig(" - 'objective=Objective(expression="m1"), ' + 'objectives=[Objective(expression="m1")], ' "outcome_constraints=[OutcomeConstraint(m3 >= -0.25), " "OutcomeConstraint(m4 <= 0.25), " "ScalarizedOutcomeConstraint(0.5*m3 + 0.5*m4 >= 0.9975 * baseline)])" @@ -271,6 +271,111 @@ def test_CloneWithArgs(self) -> None: ) +class OptimizationConfigObjectivesListTest(TestCase): + """Tests for the new `OptimizationConfig(objectives=[...])` construction path.""" + + def setUp(self) -> None: + super().setUp() + self.metrics = { + "m1": Metric(name="m1"), + "m2": Metric(name="m2"), + "m3": Metric(name="m3"), + } + self.sig = {m: m for m in self.metrics} + self.obj1 = Objective(expression="m1", metric_name_to_signature=self.sig) + self.obj2 = Objective(expression="-m2", metric_name_to_signature=self.sig) + self.scalarized_obj = Objective( + expression="2*m1 + m2", metric_name_to_signature=self.sig + ) + + def test_objectives_kwarg_construction(self) -> None: + """Test single and multi-objective construction via objectives kwarg.""" + # Single objective + config = OptimizationConfig(objectives=[self.obj1]) + self.assertEqual(config.objectives, [self.obj1]) + self.assertEqual(config.objective, self.obj1) + self.assertFalse(config.is_moo_problem) + + # Multi-objective + config = OptimizationConfig(objectives=[self.obj1, self.obj2]) + self.assertEqual(config.objectives, [self.obj1, self.obj2]) + self.assertTrue(config.is_moo_problem) + with self.assertRaisesRegex(UnsupportedError, "multiple objectives"): + config.objective + + def test_objectives_kwarg_metric_aggregation(self) -> None: + """Test metric_names, metric_name_to_signature, metric_signatures.""" + constraint = OutcomeConstraint( + expression="m3 >= 0.5", metric_name_to_signature=self.sig + ) + config = OptimizationConfig( + objectives=[self.obj1, self.obj2], + outcome_constraints=[constraint], + ) + self.assertEqual(config.metric_names, {"m1", "m2", "m3"}) + self.assertEqual( + config.metric_name_to_signature, {"m1": "m1", "m2": "m2", "m3": "m3"} + ) + self.assertEqual(config.metric_signatures, {"m1", "m2", "m3"}) + + def test_objectives_kwarg_validation(self) -> None: + """Test validation errors for objectives kwarg.""" + with self.subTest("mutual_exclusivity"): + with self.assertRaisesRegex(UserInputError, "Cannot specify both"): + OptimizationConfig(objective=self.obj1, objectives=[self.obj1]) + + with self.subTest("neither_specified"): + with self.assertRaisesRegex(UserInputError, "Must specify either"): + OptimizationConfig() + + with self.subTest("empty_list"): + with self.assertRaisesRegex(UserInputError, "must not be empty"): + OptimizationConfig(objectives=[]) + + with self.subTest("multi_objective_expression"): + multi_obj = Objective( + expression="m1, -m2", metric_name_to_signature=self.sig + ) + with self.assertRaisesRegex(ValueError, "single or scalarized"): + OptimizationConfig(objectives=[multi_obj]) + + with self.subTest("duplicate_metric_names"): + obj_dup = Objective(expression="m1", metric_name_to_signature=self.sig) + with self.assertRaisesRegex(UserInputError, "appears in multiple"): + OptimizationConfig(objectives=[self.obj1, obj_dup]) + + def test_objectives_kwarg_clone_and_repr(self) -> None: + """Test clone, clone_with_args, and repr for objectives-list configs.""" + config = OptimizationConfig(objectives=[self.obj1, self.obj2]) + + # clone preserves objectives + cloned = config.clone() + self.assertEqual(len(cloned.objectives), 2) + self.assertEqual(cloned.objectives[0].expression, "m1") + self.assertEqual(cloned.objectives[1].expression, "-m2") + self.assertTrue(cloned.is_moo_problem) + + # clone_with_args(objective=) replaces the list with a single objective + cloned = config.clone_with_args(objective=self.obj1) + self.assertEqual(len(cloned.objectives), 1) + self.assertFalse(cloned.is_moo_problem) + + # clone_with_args(objectives=) replaces the list + obj3 = Objective(expression="m3", metric_name_to_signature=self.sig) + cloned = config.clone_with_args(objectives=[self.obj1, obj3]) + self.assertEqual(len(cloned.objectives), 2) + self.assertEqual(cloned.objectives[1].expression, "m3") + + # objective= and objectives= are mutually exclusive in clone_with_args + with self.assertRaisesRegex(UserInputError, "Cannot specify both"): + config.clone_with_args(objective=self.obj1, objectives=[self.obj1]) + + # repr always uses "objectives=" + self.assertIn("objectives=", repr(config)) + single_config = OptimizationConfig(objectives=[self.obj1]) + self.assertIn("objectives=", repr(single_config)) + + class MultiObjectiveOptimizationConfigTest(TestCase): def setUp(self) -> None: super().setUp() diff --git a/ax/orchestration/tests/test_orchestrator.py b/ax/orchestration/tests/test_orchestrator.py index 023f970fca6..ab9bbcc17b8 100644 --- a/ax/orchestration/tests/test_orchestrator.py +++ b/ax/orchestration/tests/test_orchestrator.py @@ -2727,7 +2727,7 @@ def test_generate_candidates_does_not_generate_if_missing_data(self) -> None: ) self.branin_experiment.add_tracking_metric(custom_metric) self.branin_experiment.optimization_config = OptimizationConfig( - Objective( + objective=Objective( metric=CustomTestMetric( name="custom_test_metric", test_attribute="test" ), @@ -2974,7 +2974,7 @@ def setUp(self) -> None: self.branin_experiment_no_impl_runner_or_metrics = MultiTypeExperiment( search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - Objective(metric=Metric(name="branin"), minimize=True) + objective=Objective(metric=Metric(name="branin"), minimize=True) ), default_trial_type="type1", default_runner=None, diff --git a/ax/service/tests/test_best_point_utils.py b/ax/service/tests/test_best_point_utils.py index 4101f2fd6d9..dc597cd2006 100644 --- a/ax/service/tests/test_best_point_utils.py +++ b/ax/service/tests/test_best_point_utils.py @@ -615,7 +615,7 @@ def test_best_raw_objective_point_scalarized(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + objective=ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(exp) @@ -637,7 +637,7 @@ def test_best_raw_objective_point_scalarized_multi(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - ScalarizedObjective( + objective=ScalarizedObjective( metrics=[get_branin_metric(), get_branin_metric(lower_is_better=False)], weights=[0.1, -0.9], minimize=True, @@ -1037,7 +1037,7 @@ def test_best_parameters_from_model_predictions_scalarized(self) -> None: ) exp.add_tracking_metric(metric2) exp.optimization_config = OptimizationConfig( - ScalarizedObjective( + objective=ScalarizedObjective( metrics=[metric1, metric2], weights=[0.5, 0.5], minimize=True, diff --git a/ax/service/tests/test_report_utils.py b/ax/service/tests/test_report_utils.py index 0733f65cd0c..12546f0bcbd 100644 --- a/ax/service/tests/test_report_utils.py +++ b/ax/service/tests/test_report_utils.py @@ -449,10 +449,12 @@ def _test_get_standard_plots_moo_relative_constraints( names = obj.metric_names # Create a new Objective rather than mutating _expression_str to # avoid stale _parsed cached_property. - none_throws(exp.optimization_config)._objective = Objective( - expression=f"{names[0]}, -{names[1]}", - metric_name_to_signature={n: n for n in names}, - ) + none_throws(exp.optimization_config)._objectives = [ + Objective( + expression=f"{names[0]}, -{names[1]}", + metric_name_to_signature={n: n for n in names}, + ) + ] exp.get_metric(names[0]).lower_is_better = False assert_is_instance( exp.optimization_config, MultiObjectiveOptimizationConfig @@ -494,10 +496,12 @@ def test_get_standard_plots_moo_no_objective_thresholds(self) -> None: # first objective to maximize, second to minimize obj = none_throws(exp.optimization_config).objective names = obj.metric_names - none_throws(exp.optimization_config)._objective = Objective( - expression=f"{names[0]}, -{names[1]}", - metric_name_to_signature={n: n for n in names}, - ) + none_throws(exp.optimization_config)._objectives = [ + Objective( + expression=f"{names[0]}, -{names[1]}", + metric_name_to_signature={n: n for n in names}, + ) + ] exp.trials[0].run() plots = get_standard_plots( experiment=exp, diff --git a/ax/storage/json_store/decoder.py b/ax/storage/json_store/decoder.py index 4259a071c17..85a3f7b5a62 100644 --- a/ax/storage/json_store/decoder.py +++ b/ax/storage/json_store/decoder.py @@ -351,6 +351,13 @@ def object_from_json( object_json = _sanitize_inputs_to_surrogate_spec(object_json=object_json) if isclass(_class) and issubclass(_class, OptimizationConfig): object_json.pop("risk_measure", None) # Deprecated. + # Backward compat: old JSON uses "objective", new uses "objectives". + if ( + _class is OptimizationConfig + and "objective" in object_json + and "objectives" not in object_json + ): + object_json["objectives"] = [object_json.pop("objective")] return ax_class_from_json_dict( _class=_class, object_json=object_json, **vars(registry_kwargs) ) diff --git a/ax/storage/json_store/encoders.py b/ax/storage/json_store/encoders.py index d6f960ffdcc..024f04e9bcc 100644 --- a/ax/storage/json_store/encoders.py +++ b/ax/storage/json_store/encoders.py @@ -383,7 +383,7 @@ def optimization_config_to_dict( """Convert Ax optimization config to a dictionary.""" return { "__type": optimization_config.__class__.__name__, - "objective": optimization_config.objective, + "objectives": optimization_config.objectives, "outcome_constraints": optimization_config.outcome_constraints, "pruning_target_parameterization": ( optimization_config.pruning_target_parameterization @@ -782,16 +782,17 @@ def _build_opt_config_dict( will then recursively encode them via ``metric_to_dict``, capturing the full metric type. """ - objective_dict = _build_objective_dict( - objective=opt_config.objective, experiment_metrics=experiment_metrics - ) + objective_dicts = [ + _build_objective_dict(objective=obj, experiment_metrics=experiment_metrics) + for obj in opt_config.objectives + ] constraint_dicts = [ _build_constraint_dict(constraint=c, experiment_metrics=experiment_metrics) for c in opt_config.outcome_constraints ] result: dict[str, Any] = { "__type": opt_config.__class__.__name__, - "objective": objective_dict, + "objectives": objective_dicts, "outcome_constraints": constraint_dicts, "pruning_target_parameterization": opt_config.pruning_target_parameterization, } diff --git a/ax/storage/json_store/tests/test_json_store.py b/ax/storage/json_store/tests/test_json_store.py index 783bfce31d9..207fb838055 100644 --- a/ax/storage/json_store/tests/test_json_store.py +++ b/ax/storage/json_store/tests/test_json_store.py @@ -137,6 +137,7 @@ get_metric, get_mll_type, get_model_type, + get_moo_optimization_config, get_multi_objective, get_multi_objective_optimization_config, get_multi_type_experiment, @@ -380,6 +381,7 @@ ("Objective", get_objective), ("ObjectiveThreshold", get_objective_threshold), ("OptimizationConfig", get_optimization_config), + ("OptimizationConfig", get_moo_optimization_config), ("OrEarlyStoppingStrategy", get_or_early_stopping_strategy), ("OrderConstraint", get_order_constraint), ("OutcomeConstraint", get_outcome_constraint), diff --git a/ax/storage/sqa_store/decoder.py b/ax/storage/sqa_store/decoder.py index 9fb27215f44..d9754599675 100644 --- a/ax/storage/sqa_store/decoder.py +++ b/ax/storage/sqa_store/decoder.py @@ -640,7 +640,7 @@ def opt_config_and_tracking_metrics_from_sqa( register the full metric types (e.g. BraninMetric) rather than plain Metric placeholders. """ - objective = None + objectives: list[Objective] = [] objective_thresholds = [] outcome_constraints = [] tracking_metrics = [] @@ -659,7 +659,7 @@ def opt_config_and_tracking_metrics_from_sqa( result = self.metric_from_sqa(metric_sqa=metric_sqa) if isinstance(result, Objective): - objective = result + objectives.append(result) # Collect metrics from the objective if metric_sqa.intent in ( MetricIntent.MULTI_OBJECTIVE, @@ -729,7 +729,7 @@ def opt_config_and_tracking_metrics_from_sqa( tracking_metrics.append(result) all_metrics.append(raw_metric) - if objective is None: + if not objectives: return None, tracking_metrics, all_metrics if preference_objective_sqa is not None: @@ -737,6 +737,7 @@ def opt_config_and_tracking_metrics_from_sqa( raise SQADecodeError( "PreferenceOptimizationConfig cannot have objective thresholds." ) + objective = objectives[0] properties = preference_objective_sqa.properties or {} optimization_config = PreferenceOptimizationConfig( objective=assert_is_instance(objective, MultiObjective), @@ -747,7 +748,8 @@ def opt_config_and_tracking_metrics_from_sqa( outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) - elif objective_thresholds or type(objective) is MultiObjective: + elif objective_thresholds or type(objectives[0]) is MultiObjective: + objective = objectives[0] optimization_config = MultiObjectiveOptimizationConfig( objective=assert_is_instance( objective, Union[MultiObjective, ScalarizedObjective] @@ -758,7 +760,7 @@ def opt_config_and_tracking_metrics_from_sqa( ) else: optimization_config = OptimizationConfig( - objective=objective, + objectives=objectives, outcome_constraints=outcome_constraints, pruning_target_parameterization=pruning_target_parameterization, ) diff --git a/ax/storage/sqa_store/encoder.py b/ax/storage/sqa_store/encoder.py index 63ab30eaa41..9023eb07d3f 100644 --- a/ax/storage/sqa_store/encoder.py +++ b/ax/storage/sqa_store/encoder.py @@ -839,13 +839,14 @@ def optimization_config_to_sqa( ), experiment_metrics=experiment_metrics, ) + metrics_sqa.append(obj_sqa) else: - obj_sqa = self.objective_to_sqa( - objective=optimization_config.objective, - experiment_metrics=experiment_metrics, - ) - - metrics_sqa.append(obj_sqa) + for obj in optimization_config.objectives: + obj_sqa = self.objective_to_sqa( + objective=obj, + experiment_metrics=experiment_metrics, + ) + metrics_sqa.append(obj_sqa) for constraint in optimization_config.outcome_constraints: constraint_sqa = self.outcome_constraint_to_sqa( outcome_constraint=constraint, diff --git a/ax/storage/sqa_store/tests/test_sqa_store.py b/ax/storage/sqa_store/tests/test_sqa_store.py index 07de5407c20..f7b98b8d0a5 100644 --- a/ax/storage/sqa_store/tests/test_sqa_store.py +++ b/ax/storage/sqa_store/tests/test_sqa_store.py @@ -159,6 +159,7 @@ get_fixed_parameter, get_generator_run, get_model_predictions_per_arm, + get_moo_optimization_config, get_multi_objective_optimization_config, get_multi_type_experiment, get_objective, @@ -1424,6 +1425,18 @@ def test_optimization_config_pruning_target_parameterization_sqa_roundtrip( ) self.assertEqual(loaded_pruning_target_parameterization.parameters["z"], False) + def test_moo_optimization_config_sqa_roundtrip(self) -> None: + """Test SQA round-trip for OptimizationConfig with multiple objectives.""" + experiment = get_experiment_with_batch_trial() + experiment.add_tracking_metric(Metric(name="m3", lower_is_better=True)) + experiment.optimization_config = get_moo_optimization_config() + save_experiment(experiment) + loaded_experiment = load_experiment(experiment.name) + self.assertEqual(experiment, loaded_experiment) + loaded_oc = none_throws(loaded_experiment.optimization_config) + self.assertEqual(len(loaded_oc.objectives), 2) + self.assertTrue(loaded_oc.is_moo_problem) + def test_multi_objective_optimization_config_pruning_target_sqa_roundtrip( self, ) -> None: diff --git a/ax/utils/testing/core_stubs.py b/ax/utils/testing/core_stubs.py index 7c143d45463..6c452acbd6b 100644 --- a/ax/utils/testing/core_stubs.py +++ b/ax/utils/testing/core_stubs.py @@ -676,7 +676,7 @@ def get_multi_type_experiment( add_trial_type: bool = True, add_trials: bool = False, num_arms: int = 10 ) -> MultiTypeExperiment: oc = OptimizationConfig( - Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) + objective=Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) ) experiment = MultiTypeExperiment( name="test_exp", @@ -2315,6 +2315,17 @@ def get_multi_objective_optimization_config( ) +def get_moo_optimization_config() -> OptimizationConfig: + """OptimizationConfig with multiple objectives via objectives= kwarg.""" + sig = {"m1": "m1", "m3": "m3"} + return OptimizationConfig( + objectives=[ + Objective(expression="m1", metric_name_to_signature=sig), + Objective(expression="-m3", metric_name_to_signature=sig), + ], + ) + + def get_optimization_config_no_constraints( minimize: bool = False, ) -> OptimizationConfig: From 55dd5535c1e8234409c1b47d23d3bc189c1d7fde Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 7 Apr 2026 11:16:07 -0700 Subject: [PATCH 3/5] Deprecate OptimizationConfig.__init__(objective=), remove setter & clone_with_args(objective=) Summary: Goal: We added `objectives` input, we're now migrating all usage to `objectives` and eliminating the `objective` input. This will make it easier to eliminate `MultiObjective` and `MultiObjectiveOptimizationConfig`. Phase 1 of the OptimizationConfig simplification migration: - Add DeprecationWarning when passing objective= to OptimizationConfig.__init__ - Remove the objective setter on OptimizationConfig (MOOC keeps its own) - Remove objective= param from OptimizationConfig.clone_with_args (MOOC/PreferenceOC keep theirs) - Migrate all callers of the removed setter and clone_with_args(objective=) Differential Revision: D99491494 --- ax/adapter/tests/test_torch_moo_adapter.py | 13 ++-- ax/adapter/transforms/relativize.py | 7 ++- ax/adapter/transforms/standardize_y.py | 10 ++-- .../transforms/stratified_standardize_y.py | 10 ++-- ax/core/optimization_config.py | 59 ++++++------------- ax/core/tests/test_optimization_config.py | 16 +---- ax/service/tests/test_best_point.py | 28 +++++---- ax/service/tests/test_best_point_utils.py | 31 ++++++---- ax/storage/sqa_store/tests/test_sqa_store.py | 9 +-- 9 files changed, 86 insertions(+), 97 deletions(-) diff --git a/ax/adapter/tests/test_torch_moo_adapter.py b/ax/adapter/tests/test_torch_moo_adapter.py index d929c3f15a7..7f8aebca3d5 100644 --- a/ax/adapter/tests/test_torch_moo_adapter.py +++ b/ax/adapter/tests/test_torch_moo_adapter.py @@ -333,7 +333,6 @@ def test_hypervolume(self, _, cuda: bool = False) -> None: ) for trial in exp.trials.values(): trial.mark_running(no_runner_required=True).mark_completed() - # pyre-fixme[16]: Optional type has no attribute `metrics`. metrics_dict = exp.metrics # Objective thresholds and synthetic observations chosen to have closed-form # hypervolumes to test. @@ -464,9 +463,15 @@ def test_infer_objective_thresholds(self, _, cuda: bool = False) -> None: first = sub_exprs[0] if not first.startswith("-"): sub_exprs[0] = f"-{first}" - oc.objective = Objective( - expression=", ".join(sub_exprs), - metric_name_to_signature={s.lstrip("-"): s.lstrip("-") for s in sub_exprs}, + oc = oc.clone_with_args( + objectives=[ + Objective( + expression=", ".join(sub_exprs), + metric_name_to_signature={ + s.lstrip("-"): s.lstrip("-") for s in sub_exprs + }, + ) + ] ) for use_partial_thresholds in (False, True): diff --git a/ax/adapter/transforms/relativize.py b/ax/adapter/transforms/relativize.py index d3fb5dd2376..4d81cb0369d 100644 --- a/ax/adapter/transforms/relativize.py +++ b/ax/adapter/transforms/relativize.py @@ -157,7 +157,7 @@ def transform_optimization_config( "Expected multi-objective, got single-objective" ) new_optimization_config = optimization_config.clone_with_args( - objective=objective, + objectives=[objective], outcome_constraints=constraints, ) elif isinstance(optimization_config, MultiObjectiveOptimizationConfig): @@ -174,13 +174,14 @@ def transform_optimization_config( ) new_optimization_config = optimization_config.clone_with_args( - objective=optimization_config.objective, + objectives=[optimization_config.objective], outcome_constraints=constraints, objective_thresholds=obj_thresholds, ) else: new_optimization_config = optimization_config.clone_with_args( - objective=optimization_config.objective, outcome_constraints=constraints + objectives=[optimization_config.objective], + outcome_constraints=constraints, ) return new_optimization_config diff --git a/ax/adapter/transforms/standardize_y.py b/ax/adapter/transforms/standardize_y.py index 2deaf462583..72513d44bd4 100644 --- a/ax/adapter/transforms/standardize_y.py +++ b/ax/adapter/transforms/standardize_y.py @@ -133,10 +133,12 @@ def transform_optimization_config( (name, new_w) for (name, _), new_w in zip(objective.metric_weights, new_weights) ] - optimization_config.objective = _build_objective_from_metric_weights( - new_metric_weights, - metric_name_to_signature=objective.metric_name_to_signature, - ) + optimization_config._objectives = [ + _build_objective_from_metric_weights( + new_metric_weights, + metric_name_to_signature=objective.metric_name_to_signature, + ) + ] new_constraints = self._transform_constraints( optimization_config.outcome_constraints, adapter diff --git a/ax/adapter/transforms/stratified_standardize_y.py b/ax/adapter/transforms/stratified_standardize_y.py index 9ae063c3507..15670fe7f49 100644 --- a/ax/adapter/transforms/stratified_standardize_y.py +++ b/ax/adapter/transforms/stratified_standardize_y.py @@ -196,10 +196,12 @@ def transform_optimization_config( (name, new_w) for (name, _), new_w in zip(objective.metric_weights, new_weights) ] - optimization_config.objective = _build_objective_from_metric_weights( - new_metric_weights, - metric_name_to_signature=objective.metric_name_to_signature, - ) + optimization_config._objectives = [ + _build_objective_from_metric_weights( + new_metric_weights, + metric_name_to_signature=objective.metric_name_to_signature, + ) + ] optimization_config.outcome_constraints = self._transform_constraints( optimization_config.outcome_constraints, strata, adapter diff --git a/ax/core/optimization_config.py b/ax/core/optimization_config.py index c1b1694a323..8bbb0e28470 100644 --- a/ax/core/optimization_config.py +++ b/ax/core/optimization_config.py @@ -8,6 +8,7 @@ from __future__ import annotations +import warnings from collections.abc import Mapping from itertools import groupby from typing import Self @@ -80,11 +81,17 @@ def __init__( consideration, and if not, the parameter value will be replaced with the corresponding value in the target arm. """ + if objective is not None: + warnings.warn( + "Passing `objective` to OptimizationConfig is deprecated. " + "Use `objectives=[objective]` instead.", + DeprecationWarning, + stacklevel=2, + ) if objective is not None and objectives is not None: raise UserInputError( "Cannot specify both `objective` and `objectives`. " - "Use `objective` for single-objective optimization or " - "`objectives` for multi-objective optimization." + "Use `objectives=[objective]` instead." ) if objective is None and objectives is None: raise UserInputError("Must specify either `objective` or `objectives`.") @@ -113,7 +120,6 @@ def clone(self) -> Self: def clone_with_args( self, *, - objective: Objective | None = None, objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, pruning_target_parameterization: Arm @@ -122,21 +128,12 @@ def clone_with_args( """Make a copy of this optimization config. Args: - objective: Replace with a single objective. Mutually exclusive - with ``objectives``. - objectives: Replace with a list of objectives. Mutually exclusive - with ``objective``. + objectives: Replace with a list of objectives. outcome_constraints: Replace outcome constraints. Pass ``None`` to clear them. pruning_target_parameterization: Replace the pruning target. """ - if objective is not None and objectives is not None: - raise UserInputError( - "Cannot specify both `objective` and `objectives` in clone_with_args." - ) - if objective is not None: - cloned_objectives = [objective] - elif objectives is not None: + if objectives is not None: cloned_objectives = objectives else: cloned_objectives = [obj.clone() for obj in self._objectives] @@ -182,15 +179,6 @@ def objective(self) -> Objective: ) return self._objectives[0] - @objective.setter - def objective(self, objective: Objective) -> None: - """Set objective. Only valid for single-objective configs.""" - self._validate_transformed_optimization_config( - objectives=[objective], - outcome_constraints=self.outcome_constraints, - ) - self._objectives = [objective] - @property def all_constraints(self) -> list[OutcomeConstraint]: """Get outcome constraints.""" @@ -449,7 +437,6 @@ def __init__( def clone_with_args( self, *, - objective: Objective | None = None, objectives: list[Objective] | None = None, outcome_constraints: None | (list[OutcomeConstraint]) = _NO_OUTCOME_CONSTRAINTS, objective_thresholds: None @@ -458,13 +445,7 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> "MultiObjectiveOptimizationConfig": """Make a copy of this optimization config.""" - if objective is not None and objectives is not None: - raise UserInputError( - "Cannot specify both `objective` and `objectives` in clone_with_args." - ) - if objective is not None: - cloned_objectives = [objective] - elif objectives is not None: + if objectives is not None: cloned_objectives = objectives else: cloned_objectives = [obj.clone() for obj in self._objectives] @@ -709,7 +690,6 @@ def is_bope_problem(self) -> bool: def clone_with_args( self, *, - objective: Objective | None = None, objectives: list[Objective] | None = None, preference_profile_name: str | None = None, outcome_constraints: list[OutcomeConstraint] | None = _NO_OUTCOME_CONSTRAINTS, @@ -718,16 +698,11 @@ def clone_with_args( | None = _NO_PRUNING_TARGET_PARAMETERIZATION, ) -> PreferenceOptimizationConfig: """Make a copy of this optimization config.""" - if objective is not None and objectives is not None: - raise UserInputError( - "Cannot specify both `objective` and `objectives` in clone_with_args." - ) - if objective is not None: - cloned_objectives = [objective] - elif objectives is not None: - cloned_objectives = objectives - else: - cloned_objectives = [obj.clone() for obj in self._objectives] + cloned_objectives = ( + [obj.clone() for obj in self._objectives] + if objectives is None + else objectives + ) preference_profile_name = ( self.preference_profile_name diff --git a/ax/core/tests/test_optimization_config.py b/ax/core/tests/test_optimization_config.py index c8b5c713883..d03143422f7 100644 --- a/ax/core/tests/test_optimization_config.py +++ b/ax/core/tests/test_optimization_config.py @@ -84,8 +84,6 @@ def test_Init(self) -> None: objective=self.objective, outcome_constraints=self.outcome_constraints ) self.assertEqual(str(config1), OC_STR) - with self.assertRaises(ValueError): - config1.objective = self.alt_objective # constrained Objective. # updating constraints is fine. config1.outcome_constraints = [self.outcome_constraint] self.assertEqual(len(config1.metric_names), 2) @@ -94,10 +92,6 @@ def test_Init(self) -> None: config2 = OptimizationConfig(objective=self.objective) self.assertEqual(config2.outcome_constraints, []) - # setting objective is fine too, if it's compatible with constraints.. - config2.objective = self.m2_objective - # setting constraints on objectives is fine for MultiObjective components. - config2.outcome_constraints = self.outcome_constraints self.assertEqual(config2.outcome_constraints, self.outcome_constraints) @@ -355,8 +349,8 @@ def test_objectives_kwarg_clone_and_repr(self) -> None: self.assertEqual(cloned.objectives[1].expression, "-m2") self.assertTrue(cloned.is_moo_problem) - # clone_with_args(objective=) replaces the list with a single objective - cloned = config.clone_with_args(objective=self.obj1) + # clone_with_args(objectives=) replaces the list with a single objective + cloned = config.clone_with_args(objectives=[self.obj1]) self.assertEqual(len(cloned.objectives), 1) self.assertFalse(cloned.is_moo_problem) @@ -366,10 +360,6 @@ def test_objectives_kwarg_clone_and_repr(self) -> None: self.assertEqual(len(cloned.objectives), 2) self.assertEqual(cloned.objectives[1].expression, "m3") - # objective= and objectives= are mutually exclusive in clone_with_args - with self.assertRaisesRegex(UserInputError, "Cannot specify both"): - config.clone_with_args(objective=self.obj1, objectives=[self.obj1]) - # repr always uses "objectives=" self.assertIn("objectives=", repr(config)) single_config = OptimizationConfig(objectives=[self.obj1]) @@ -815,7 +805,7 @@ def test_Clone(self) -> None: objectives=[self.objectives["o1"], self.objectives["o3"]] ) cloned_with_diff_objective = config.clone_with_args( - objective=different_objective + objectives=[different_objective] ) self.assertEqual( cloned_with_diff_objective.objective.expression, diff --git a/ax/service/tests/test_best_point.py b/ax/service/tests/test_best_point.py index 03393107221..78c48769aef 100644 --- a/ax/service/tests/test_best_point.py +++ b/ax/service/tests/test_best_point.py @@ -60,12 +60,14 @@ def test_get_trace(self) -> None: self.assertEqual(get_trace(exp), [11, 10, 9, 9, 5]) # Same experiment with maximize via new optimization config. - opt_conf = none_throws(exp.optimization_config).clone() - opt_conf.objective = Objective( - expression=opt_conf.objective.metric_names[0], - metric_name_to_signature={ - opt_conf.objective.metric_names[0]: opt_conf.objective.metric_names[0] - }, + metric_name = none_throws(exp.optimization_config).objective.metric_names[0] + opt_conf = none_throws(exp.optimization_config).clone_with_args( + objectives=[ + Objective( + expression=metric_name, + metric_name_to_signature={metric_name: metric_name}, + ) + ], ) self.assertEqual(get_trace(exp, opt_conf), [11, 11, 11, 15, 15]) @@ -441,12 +443,14 @@ def test_get_best_observed_value(self) -> None: ) self.assertEqual(get_best(exp), 5) # Same experiment with maximize via new optimization config. - opt_conf = none_throws(exp.optimization_config).clone() - opt_conf.objective = Objective( - expression=opt_conf.objective.metric_names[0], - metric_name_to_signature={ - opt_conf.objective.metric_names[0]: opt_conf.objective.metric_names[0] - }, + metric_name = none_throws(exp.optimization_config).objective.metric_names[0] + opt_conf = none_throws(exp.optimization_config).clone_with_args( + objectives=[ + Objective( + expression=metric_name, + metric_name_to_signature={metric_name: metric_name}, + ) + ], ) self.assertEqual(get_best(exp, opt_conf), 15) diff --git a/ax/service/tests/test_best_point_utils.py b/ax/service/tests/test_best_point_utils.py index dc597cd2006..c8b80851188 100644 --- a/ax/service/tests/test_best_point_utils.py +++ b/ax/service/tests/test_best_point_utils.py @@ -615,7 +615,9 @@ def test_best_raw_objective_point_scalarized(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - objective=ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + objectives=[ + ScalarizedObjective(metrics=[get_branin_metric()], minimize=True) + ], ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(exp) @@ -637,11 +639,16 @@ def test_best_raw_objective_point_scalarized_multi(self) -> None: exp = get_branin_experiment() gs = choose_generation_strategy_legacy(search_space=exp.search_space) exp.optimization_config = OptimizationConfig( - objective=ScalarizedObjective( - metrics=[get_branin_metric(), get_branin_metric(lower_is_better=False)], - weights=[0.1, -0.9], - minimize=True, - ) + objectives=[ + ScalarizedObjective( + metrics=[ + get_branin_metric(), + get_branin_metric(lower_is_better=False), + ], + weights=[0.1, -0.9], + minimize=True, + ) + ], ) with self.assertRaisesRegex(ValueError, "Cannot identify best "): get_best_raw_objective_point_with_trial_index(experiment=exp) @@ -1037,11 +1044,13 @@ def test_best_parameters_from_model_predictions_scalarized(self) -> None: ) exp.add_tracking_metric(metric2) exp.optimization_config = OptimizationConfig( - objective=ScalarizedObjective( - metrics=[metric1, metric2], - weights=[0.5, 0.5], - minimize=True, - ) + objectives=[ + ScalarizedObjective( + metrics=[metric1, metric2], + weights=[0.5, 0.5], + minimize=True, + ) + ], ) # Run trials and generate data diff --git a/ax/storage/sqa_store/tests/test_sqa_store.py b/ax/storage/sqa_store/tests/test_sqa_store.py index f7b98b8d0a5..f310fbeba6b 100644 --- a/ax/storage/sqa_store/tests/test_sqa_store.py +++ b/ax/storage/sqa_store/tests/test_sqa_store.py @@ -1270,9 +1270,10 @@ def test_experiment_objective_updates(self) -> None: # update objective # (should perform update in place) - optimization_config = get_optimization_config() objective = get_objective(minimize=True) - optimization_config.objective = objective + optimization_config = get_optimization_config().clone_with_args( + objectives=[objective] + ) experiment.optimization_config = optimization_config save_experiment(experiment) self.assertEqual( @@ -1282,8 +1283,8 @@ def test_experiment_objective_updates(self) -> None: # replace objective # (old one should become tracking metric) experiment.add_tracking_metric(Metric(name="objective")) - optimization_config.objective = Objective( - metric=Metric(name="objective"), minimize=False + optimization_config = optimization_config.clone_with_args( + objectives=[Objective(metric=Metric(name="objective"), minimize=False)] ) experiment.optimization_config = optimization_config save_experiment(experiment) From 7b29e18ecb44cc5551619bed0e9c7eb9f9ab80c8 Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 7 Apr 2026 11:19:28 -0700 Subject: [PATCH 4/5] Migrate production OptimizationConfig(objective=) to objectives=[] Summary: Migrate all production (non-test, non-stubs) callers of OptimizationConfig(objective=...) to use the new objectives=[...] parameter. This is part of the phased deprecation of the objective= keyword argument. Mechanical migration of all base OptimizationConfig callers in core_stubs.py from the deprecated objective= kwarg to objectives=[...]. MultiObjectiveOptimizationConfig callers are not changed. This is part of a phased deprecation of the objective= parameter from OptimizationConfig.__init__. Differential Revision: D99504973 [Easy][Ax] Migrate core_stubs.py from objective= to objectives=[] D99506123 --- ax/api/utils/instantiation/from_string.py | 2 +- ax/benchmark/benchmark_problem.py | 2 +- .../surrogate/hss/cifar10_surrogate.py | 18 +++---- .../surrogate/hss/fashion_mnist_surrogate.py | 18 +++---- .../problems/surrogate/hss/mnist_surrogate.py | 18 +++---- ax/early_stopping/experiment_replay.py | 2 +- ax/service/utils/instantiation.py | 2 +- ax/utils/preference/preference_utils.py | 2 +- ax/utils/testing/core_stubs.py | 48 ++++++++++--------- 9 files changed, 61 insertions(+), 51 deletions(-) diff --git a/ax/api/utils/instantiation/from_string.py b/ax/api/utils/instantiation/from_string.py index 5b2ebfb787b..2a15b5f90d9 100644 --- a/ax/api/utils/instantiation/from_string.py +++ b/ax/api/utils/instantiation/from_string.py @@ -97,6 +97,6 @@ def optimization_config_from_string( ) return OptimizationConfig( - objective=objective, + objectives=[objective], outcome_constraints=outcome_constraints, ) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 2e07f1aa279..042d5b379a1 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -334,7 +334,7 @@ def get_soo_opt_config( ) config = OptimizationConfig( - objective=objective, outcome_constraints=outcome_constraints + objectives=[objective], outcome_constraints=outcome_constraints ) return config, [obj_metric] + constraint_metrics diff --git a/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py b/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py index 3d5928abf3d..04364dc78d8 100644 --- a/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py +++ b/ax/benchmark/problems/surrogate/hss/cifar10_surrogate.py @@ -182,14 +182,16 @@ def get_cifar10_surrogate_benchmark( ) optimization_config = OptimizationConfig( - objective=Objective( - metric=BenchmarkMetric( - name="CIFAR10 Test Accuracy", - lower_is_better=False, - observe_noise_sd=False, - ), - minimize=False, - ) + objectives=[ + Objective( + metric=BenchmarkMetric( + name="CIFAR10 Test Accuracy", + lower_is_better=False, + observe_noise_sd=False, + ), + minimize=False, + ) + ], ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py b/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py index f2efc48b65b..3a20512a210 100644 --- a/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py +++ b/ax/benchmark/problems/surrogate/hss/fashion_mnist_surrogate.py @@ -182,14 +182,16 @@ def get_fashion_mnist_surrogate_benchmark( ) optimization_config = OptimizationConfig( - objective=Objective( - metric=BenchmarkMetric( - name="FashionMNIST Test Accuracy", - lower_is_better=False, - observe_noise_sd=False, - ), - minimize=False, - ) + objectives=[ + Objective( + metric=BenchmarkMetric( + name="FashionMNIST Test Accuracy", + lower_is_better=False, + observe_noise_sd=False, + ), + minimize=False, + ) + ], ) return BenchmarkProblem( diff --git a/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py b/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py index 7235ed9c3d7..52c9131cb6a 100644 --- a/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py +++ b/ax/benchmark/problems/surrogate/hss/mnist_surrogate.py @@ -150,14 +150,16 @@ def get_mnist_surrogate_benchmark( ) optimization_config = OptimizationConfig( - objective=Objective( - metric=BenchmarkMetric( - name="MNIST Test Accuracy", - lower_is_better=False, - observe_noise_sd=False, - ), - minimize=False, - ) + objectives=[ + Objective( + metric=BenchmarkMetric( + name="MNIST Test Accuracy", + lower_is_better=False, + observe_noise_sd=False, + ), + minimize=False, + ) + ], ) return BenchmarkProblem( diff --git a/ax/early_stopping/experiment_replay.py b/ax/early_stopping/experiment_replay.py index a6b81b7226e..e9cc96dcc7c 100644 --- a/ax/early_stopping/experiment_replay.py +++ b/ax/early_stopping/experiment_replay.py @@ -69,7 +69,7 @@ def replay_experiment( lower_is_better=metric.lower_is_better, ) optimization_config = OptimizationConfig( - objective=Objective(metric=replay_metric), + objectives=[Objective(metric=replay_metric)], ) runner = MapDataReplayRunner(replay_metric=replay_metric) diff --git a/ax/service/utils/instantiation.py b/ax/service/utils/instantiation.py index 0b9f017cf76..6df082bc2c7 100644 --- a/ax/service/utils/instantiation.py +++ b/ax/service/utils/instantiation.py @@ -626,7 +626,7 @@ def optimization_config_from_objectives( "thresholds." ) return OptimizationConfig( - objective=objectives[0], + objectives=[objectives[0]], outcome_constraints=outcome_constraints, ) diff --git a/ax/utils/preference/preference_utils.py b/ax/utils/preference/preference_utils.py index e69c2fa6ea6..108fa4b6130 100644 --- a/ax/utils/preference/preference_utils.py +++ b/ax/utils/preference/preference_utils.py @@ -57,7 +57,7 @@ def get_preference_adapter( # in the data. Requires optimization_config to specify which metrics to use. pref_metric = Metric(name=Keys.PAIRWISE_PREFERENCE_QUERY.value) optimization_config = OptimizationConfig( - objective=Objective(metric=pref_metric, minimize=False) + objectives=[Objective(metric=pref_metric, minimize=False)] ) # Register the metric on the experiment if not already present. # This is required for _extract_observation_data filtering in TorchAdapter. diff --git a/ax/utils/testing/core_stubs.py b/ax/utils/testing/core_stubs.py index 6c452acbd6b..03cca63d20a 100644 --- a/ax/utils/testing/core_stubs.py +++ b/ax/utils/testing/core_stubs.py @@ -254,7 +254,7 @@ def get_experiment_with_custom_runner_and_metric( outcome_constraints.append(custom_scalarized_constraint) optimization_config = OptimizationConfig( - objective=custom_scalarized_objective, + objectives=[custom_scalarized_objective], outcome_constraints=outcome_constraints, ) else: @@ -573,10 +573,12 @@ def get_branin_experiment_with_timestamp_map_metric( else: # single objective case optimization_config = OptimizationConfig( - objective=Objective( - metric=local_get_map_metric(name="branin_map"), - minimize=True, - ), + objectives=[ + Objective( + metric=local_get_map_metric(name="branin_map"), + minimize=True, + ) + ], outcome_constraints=outcome_constraints, ) @@ -676,7 +678,7 @@ def get_multi_type_experiment( add_trial_type: bool = True, add_trials: bool = False, num_arms: int = 10 ) -> MultiTypeExperiment: oc = OptimizationConfig( - objective=Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True) + objectives=[Objective(metric=BraninMetric("m1", ["x1", "x2"]), minimize=True)] ) experiment = MultiTypeExperiment( name="test_exp", @@ -721,7 +723,7 @@ def get_factorial_experiment( search_space=get_factorial_search_space(), optimization_config=( OptimizationConfig( - objective=Objective(metric=get_factorial_metric(), minimize=False) + objectives=[Objective(metric=get_factorial_metric(), minimize=False)] ) if has_optimization_config else None @@ -1001,7 +1003,7 @@ def get_experiment_with_scalarized_objective_and_outcome_constraint() -> Experim get_scalarized_outcome_constraint(), ] optimization_config = OptimizationConfig( - objective=objective, outcome_constraints=outcome_constraints + objectives=[objective], outcome_constraints=outcome_constraints ) experiment = Experiment( name="test_experiment_scalarized_objective and outcome constraint", @@ -1110,7 +1112,7 @@ def get_experiment_with_observations( tracking_metrics_from_opt_config = list(metrics) if scalarized: optimization_config = OptimizationConfig( - objective=ScalarizedObjective(metrics) + objectives=[ScalarizedObjective(metrics)] ) if constrained: raise NotImplementedError @@ -1180,10 +1182,10 @@ def get_experiment_with_observations( relative=False, ) optimization_config = OptimizationConfig( - objective=objective, outcome_constraints=[constraint] + objectives=[objective], outcome_constraints=[constraint] ) else: - optimization_config = OptimizationConfig(objective=objective) + optimization_config = OptimizationConfig(objectives=[objective]) else: tracking_metrics_from_opt_config = [] search_space = search_space or get_search_space_for_range_values(min=0.0, max=1.0) @@ -1281,13 +1283,15 @@ def get_high_dimensional_branin_experiment( sq_parameters = {f"x{i}": 1.0 if i < 25 else 2.0 for i in range(50)} optimization_config = OptimizationConfig( - objective=Objective( - metric=BraninMetric( - name="objective", - param_names=["x19", "x44"], - ), - minimize=True, - ) + objectives=[ + Objective( + metric=BraninMetric( + name="objective", + param_names=["x19", "x44"], + ), + minimize=True, + ) + ] ) exp = Experiment( @@ -2286,13 +2290,13 @@ def get_optimization_config( [get_outcome_constraint(relative=relative)] if outcome_constraint else [] ) return OptimizationConfig( - objective=objective, outcome_constraints=outcome_constraints + objectives=[objective], outcome_constraints=outcome_constraints ) def get_map_optimization_config() -> OptimizationConfig: objective = get_map_objective() - return OptimizationConfig(objective=objective) + return OptimizationConfig(objectives=[objective]) def get_multi_objective_optimization_config( @@ -2330,7 +2334,7 @@ def get_optimization_config_no_constraints( minimize: bool = False, ) -> OptimizationConfig: return OptimizationConfig( - objective=Objective(metric=Metric("test_metric"), minimize=minimize) + objectives=[Objective(metric=Metric("test_metric"), minimize=minimize)] ) @@ -2353,7 +2357,7 @@ def get_branin_optimization_config( ) ) return OptimizationConfig( - objective=get_branin_objective(minimize=minimize), + objectives=[get_branin_objective(minimize=minimize)], outcome_constraints=outcome_constraint, ) From 533fa184311cd616ed516b0ef7c376174255a6fd Mon Sep 17 00:00:00 2001 From: Sait Cakmak Date: Tue, 7 Apr 2026 13:29:50 -0700 Subject: [PATCH 5/5] Migrate core/adapter test callers from objective= to objectives=[] Summary: Mechanical migration of base OptimizationConfig callers in test files under ax/core/tests/ and ax/adapter/transforms/tests/ from the deprecated objective= kwarg to objectives=[...]. MultiObjectiveOptimizationConfig callers are not changed. Files changed: - ax/core/tests/test_optimization_config.py - ax/core/tests/test_derived_metric.py - ax/core/tests/test_experiment.py - ax/core/tests/test_multi_type_experiment.py - ax/core/tests/test_utils.py - ax/adapter/transforms/tests/test_log_y_transform.py - ax/adapter/transforms/tests/test_power_y_transform.py - ax/adapter/transforms/tests/test_standardize_y_transform.py - ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py Differential Revision: D99506710 --- .../transforms/tests/test_log_y_transform.py | 12 +++---- .../tests/test_power_y_transform.py | 14 ++++---- .../tests/test_standardize_y_transform.py | 14 ++++---- ...test_stratified_standardize_y_transform.py | 16 ++++----- ax/core/tests/test_derived_metric.py | 4 +-- ax/core/tests/test_experiment.py | 28 +++++++++------ ax/core/tests/test_multi_type_experiment.py | 2 +- ax/core/tests/test_optimization_config.py | 36 +++++++++---------- ax/core/tests/test_utils.py | 26 ++++++++------ 9 files changed, 82 insertions(+), 70 deletions(-) diff --git a/ax/adapter/transforms/tests/test_log_y_transform.py b/ax/adapter/transforms/tests/test_log_y_transform.py index b35ab67ab31..458efa66a25 100644 --- a/ax/adapter/transforms/tests/test_log_y_transform.py +++ b/ax/adapter/transforms/tests/test_log_y_transform.py @@ -138,14 +138,14 @@ def test_TransformOptimizationConfig(self) -> None: # basic test m1 = Metric(name="m1") objective_m1 = Objective(metric=m1, minimize=False) - oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[]) + oc = OptimizationConfig(objectives=[objective_m1], outcome_constraints=[]) tf = LogY(search_space=None, config={"metrics": ["m1"]}) oc_tf = tf.transform_optimization_config(deepcopy(oc), None, None) self.assertEqual(oc_tf, oc) # output constraint on a different metric should work m2 = Metric(name="m2") oc = OptimizationConfig( - objective=objective_m1, + objectives=[objective_m1], outcome_constraints=[ get_outcome_constraint(metric=m2, bound=-1, relative=False) ], @@ -155,7 +155,7 @@ def test_TransformOptimizationConfig(self) -> None: # output constraint with a negative bound should fail objective_m2 = Objective(metric=m2, minimize=False) oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=-1.234, relative=False) ], @@ -170,7 +170,7 @@ def test_TransformOptimizationConfig(self) -> None: ) # output constraint with a zero bound should also fail oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=0, relative=False) ], @@ -185,7 +185,7 @@ def test_TransformOptimizationConfig(self) -> None: ) # output constraint with a positive bound should work oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=2.345, relative=False) ], @@ -200,7 +200,7 @@ def test_TransformOptimizationConfig(self) -> None: self.assertEqual(oc_tf, oc) # output constraint with a relative bound should fail oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ get_outcome_constraint(metric=m1, bound=2.345, relative=True) ], diff --git a/ax/adapter/transforms/tests/test_power_y_transform.py b/ax/adapter/transforms/tests/test_power_y_transform.py index a6f967cfc3c..224b8093c16 100644 --- a/ax/adapter/transforms/tests/test_power_y_transform.py +++ b/ax/adapter/transforms/tests/test_power_y_transform.py @@ -207,7 +207,7 @@ def test_transform_optimization_config(self) -> None: # basic test m1 = Metric(name="m1") objective_m1 = Objective(metric=m1, minimize=False) - oc = OptimizationConfig(objective=objective_m1, outcome_constraints=[]) + oc = OptimizationConfig(objectives=[objective_m1], outcome_constraints=[]) tf = PowerTransformY( search_space=None, experiment_data=self.experiment_data, @@ -219,7 +219,7 @@ def test_transform_optimization_config(self) -> None: m2 = Metric(name="m2") for bound in [-1.234, 0, 2.345]: oc = OptimizationConfig( - objective=objective_m1, + objectives=[objective_m1], outcome_constraints=get_constraint( metric=m2, bound=bound, relative=False ), @@ -230,7 +230,7 @@ def test_transform_optimization_config(self) -> None: objective_m2 = Objective(metric=m2, minimize=False) for bound in [-1.234, 0, 2.345]: oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=get_constraint( metric=m1, bound=bound, relative=False ), @@ -259,7 +259,7 @@ def test_transform_optimization_config(self) -> None: self.assertAlmostEqual(c_actual.bound, c_expected.bound, places=5) # Relative constraints aren't supported oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=get_constraint(metric=m1, bound=2.345, relative=True), ) with self.assertRaisesRegex( @@ -277,7 +277,7 @@ def test_transform_optimization_config(self) -> None: # Support for scalarized outcome constraints isn't implemented m3 = Metric(name="m3") oc = OptimizationConfig( - objective=objective_m2, + objectives=[objective_m2], outcome_constraints=[ ScalarizedOutcomeConstraint( metrics=[m1, m3], op=ComparisonOp.GEQ, bound=2.345, relative=False @@ -295,7 +295,9 @@ def test_transform_optimization_config(self) -> None: scalarized_objective = ScalarizedObjective( metrics=[m1, m3], weights=[1.0, 2.0], minimize=False ) - oc = OptimizationConfig(objective=scalarized_objective, outcome_constraints=[]) + oc = OptimizationConfig( + objectives=[scalarized_objective], outcome_constraints=[] + ) with self.assertRaisesRegex(NotImplementedError, "ScalarizedObjective"): tf.transform_optimization_config(oc, None, None) diff --git a/ax/adapter/transforms/tests/test_standardize_y_transform.py b/ax/adapter/transforms/tests/test_standardize_y_transform.py index 6985b589328..92828b7b9ed 100644 --- a/ax/adapter/transforms/tests/test_standardize_y_transform.py +++ b/ax/adapter/transforms/tests/test_standardize_y_transform.py @@ -111,7 +111,7 @@ def test_TransformOptimizationConfig(self) -> None: ), ] - oc = OptimizationConfig(objective=objective, outcome_constraints=cons) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=cons) with self.assertRaisesRegex( DataRequiredError, "`StandardizeY` transform requires constraint metric" ): @@ -125,7 +125,7 @@ def test_TransformOptimizationConfig(self) -> None: relative=False, ), ] - oc = OptimizationConfig(objective=objective, outcome_constraints=cons) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=cons) with self.assertRaisesRegex( DataRequiredError, "`StandardizeY` transform requires constraint metric" ): @@ -145,7 +145,7 @@ def test_TransformOptimizationConfig(self) -> None: relative=False, ), ] - oc = OptimizationConfig(objective=objective, outcome_constraints=cons) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=cons) oc = self.t.transform_optimization_config(oc, None, None) # Verify the transformed constraints have the expected values. # We compare properties individually to avoid floating-point string @@ -174,7 +174,7 @@ def test_TransformOptimizationConfig(self) -> None: con = OutcomeConstraint( metric=m1, op=ComparisonOp.GEQ, bound=2.0, relative=True ) - oc = OptimizationConfig(objective=objective, outcome_constraints=[con]) + oc = OptimizationConfig(objectives=[objective], outcome_constraints=[con]) with self.assertRaises(ValueError): oc = self.t.transform_optimization_config(oc, None, None) @@ -229,7 +229,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective = ScalarizedObjective( metrics=[m1, m2], weights=[0.5, 0.5], minimize=False ) - oc = OptimizationConfig(objective=objective) + oc = OptimizationConfig(objectives=[objective]) oc_transformed = self.t.transform_optimization_config(oc, None, None) # Check that weights are scaled by standard deviations @@ -245,7 +245,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective_missing = ScalarizedObjective( metrics=[m1, m3], weights=[0.5, 0.5], minimize=False ) - oc_missing = OptimizationConfig(objective=objective_missing) + oc_missing = OptimizationConfig(objectives=[objective_missing]) with self.assertRaisesRegex( DataRequiredError, "`StandardizeY` transform requires objective metric" ): @@ -255,7 +255,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective_minimize = ScalarizedObjective( metrics=[m1, m2], weights=[1.0, -2.0], minimize=True ) - oc_minimize = OptimizationConfig(objective=objective_minimize) + oc_minimize = OptimizationConfig(objectives=[objective_minimize]) oc_minimize_transformed = self.t.transform_optimization_config( oc_minimize, None, None ) diff --git a/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py b/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py index 52b1f1169bc..cd1c9235531 100644 --- a/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py +++ b/ax/adapter/transforms/tests/test_stratified_standardize_y_transform.py @@ -314,7 +314,7 @@ def test_TransformObservations(self) -> None: def test_TransformOptimizationConfig(self) -> None: cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) fixed_features = ObservationFeatures({"z": "a"}) oc = self.t.transform_optimization_config(oc, None, fixed_features) # Verify constraint values approximately (expression-string equality @@ -330,7 +330,7 @@ def test_TransformOptimizationConfig(self) -> None: self.assertTrue(oc.objective == self.objective) # No constraints - oc2 = OptimizationConfig(objective=self.objective) + oc2 = OptimizationConfig(objectives=[self.objective]) oc3 = deepcopy(oc2) fixed_features = ObservationFeatures({"z": "a"}) oc3 = self.t.transform_optimization_config(oc3, None, fixed_features) @@ -340,7 +340,7 @@ def test_TransformOptimizationConfig(self) -> None: con = OutcomeConstraint( metric=self.m1, op=ComparisonOp.GEQ, bound=2.0, relative=True ) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=[con]) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=[con]) with self.assertRaises(ValueError): oc = self.t.transform_optimization_config(oc, None, fixed_features) # Fail without strat param fixed @@ -350,10 +350,10 @@ def test_TransformOptimizationConfig(self) -> None: def test_TransformOptimizationConfigWithStrataMapping(self) -> None: cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) fixed_features = ObservationFeatures({"z": "a"}) cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) oc = self.t2.transform_optimization_config(oc, None, fixed_features) # Verify constraint values approximately. self.assertEqual(len(oc.outcome_constraints), 2) @@ -367,7 +367,7 @@ def test_TransformOptimizationConfigWithStrataMapping(self) -> None: self.assertTrue(oc.objective == self.objective) fixed_features = ObservationFeatures({"z": "c"}) cons2 = deepcopy(self.cons) - oc = OptimizationConfig(objective=self.objective, outcome_constraints=cons2) + oc = OptimizationConfig(objectives=[self.objective], outcome_constraints=cons2) oc = self.t2.transform_optimization_config(oc, None, fixed_features) # Verify constraint values approximately. self.assertEqual(len(oc.outcome_constraints), 2) @@ -441,7 +441,7 @@ def test_TransformOptimizationConfigWithScalarizedObjective(self) -> None: objective = ScalarizedObjective( metrics=[self.m1, self.m2], weights=[0.5, 0.5], minimize=False ) - oc = OptimizationConfig(objective=objective) + oc = OptimizationConfig(objectives=[objective]) expected_weights = { "a": [0.5 * 1.0, 0.5 * sqrt(2) * 3], "b": [0.5 * 2 * sqrt(2), 0.5 * sqrt(2) * 0.5], @@ -476,7 +476,7 @@ def test_TransformAndUntransformScalarizedOutcomeConstraint(self) -> None: relative=False, ) oc = OptimizationConfig( - objective=self.objective, outcome_constraints=[scalarized_constraint] + objectives=[self.objective], outcome_constraints=[scalarized_constraint] ) fixed_features = ObservationFeatures({"z": "a"}) oc_transformed = self.t.transform_optimization_config(oc, None, fixed_features) diff --git a/ax/core/tests/test_derived_metric.py b/ax/core/tests/test_derived_metric.py index 3e05c279d33..2443207a69d 100644 --- a/ax/core/tests/test_derived_metric.py +++ b/ax/core/tests/test_derived_metric.py @@ -460,7 +460,7 @@ def test_two_phase_experiment_fetch(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="obj"), minimize=True), + objectives=[Objective(metric=Metric(name="obj"), minimize=True)], outcome_constraints=[ OutcomeConstraint( metric=derived, @@ -941,7 +941,7 @@ def test_experiment_integration(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="obj"), minimize=True), + objectives=[Objective(metric=Metric(name="obj"), minimize=True)], outcome_constraints=[ OutcomeConstraint( metric=derived_ratio, diff --git a/ax/core/tests/test_experiment.py b/ax/core/tests/test_experiment.py index 31432d5a3ef..53ac489b450 100644 --- a/ax/core/tests/test_experiment.py +++ b/ax/core/tests/test_experiment.py @@ -305,7 +305,9 @@ def test_metric_setters(self) -> None: # Add a new metric and set optimization config using it as constraint self.experiment.add_metric(Metric(name="m3")) opt_config = OptimizationConfig( - objective=Objective(expression="m1", metric_name_to_signature={"m1": "m1"}), + objectives=[ + Objective(expression="m1", metric_name_to_signature={"m1": "m1"}) + ], outcome_constraints=[ OutcomeConstraint( expression="m3 >= -0.25 * baseline", @@ -558,7 +560,9 @@ def test_optimization_config_setter(self) -> None: # Setting an opt config with an unregistered metric should raise new_opt_config = OptimizationConfig( - objective=Objective(expression="m1", metric_name_to_signature={"m1": "m1"}), + objectives=[ + Objective(expression="m1", metric_name_to_signature={"m1": "m1"}) + ], outcome_constraints=[ OutcomeConstraint( expression="unknown_metric >= 0.5", @@ -930,7 +934,7 @@ def test_attach(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="a", lower_is_better=True)) + objectives=[Objective(metric=Metric(name="a", lower_is_better=True))] ), tracking_metrics=[Metric(name="b"), Metric(name="c")], runner=SyntheticRunner(), @@ -1024,7 +1028,7 @@ def test_lookup_data(self) -> None: name="test", search_space=get_branin_search_space(), optimization_config=OptimizationConfig( - objective=Objective(metric=Metric(name="a", lower_is_better=True)) + objectives=[Objective(metric=Metric(name="a", lower_is_better=True))] ), tracking_metrics=[Metric(name="b"), Metric(name="c")], runner=SyntheticRunner(), @@ -1676,13 +1680,15 @@ def test_metric_summary_df_scalarized_objective(self) -> None: name="test_experiment", search_space=SearchSpace(parameters=[]), optimization_config=OptimizationConfig( - objective=Objective( - expression="2*metric_a + -3*metric_b", - metric_name_to_signature={ - "metric_a": "metric_a", - "metric_b": "metric_b", - }, - ), + objectives=[ + Objective( + expression="2*metric_a + -3*metric_b", + metric_name_to_signature={ + "metric_a": "metric_a", + "metric_b": "metric_b", + }, + ) + ], ), tracking_metrics=[ Metric(name="metric_a", lower_is_better=False), diff --git a/ax/core/tests/test_multi_type_experiment.py b/ax/core/tests/test_multi_type_experiment.py index 7775cea3f5b..1b351067cea 100644 --- a/ax/core/tests/test_multi_type_experiment.py +++ b/ax/core/tests/test_multi_type_experiment.py @@ -154,7 +154,7 @@ def test_setting_opt_config(self) -> None: m3 = BraninMetric("m3", ["x1", "x2"]) self.experiment.add_tracking_metric(m3) self.experiment.optimization_config = OptimizationConfig( - objective=Objective(metric=m3, minimize=True) + objectives=[Objective(metric=m3, minimize=True)] ) self.assertDictEqual( self.experiment._metric_to_trial_type, diff --git a/ax/core/tests/test_optimization_config.py b/ax/core/tests/test_optimization_config.py index d03143422f7..b88a440930f 100644 --- a/ax/core/tests/test_optimization_config.py +++ b/ax/core/tests/test_optimization_config.py @@ -81,7 +81,7 @@ def setUp(self) -> None: def test_Init(self) -> None: config1 = OptimizationConfig( - objective=self.objective, outcome_constraints=self.outcome_constraints + objectives=[self.objective], outcome_constraints=self.outcome_constraints ) self.assertEqual(str(config1), OC_STR) # updating constraints is fine. @@ -89,7 +89,7 @@ def test_Init(self) -> None: self.assertEqual(len(config1.metric_names), 2) # objective without outcome_constraints is also supported - config2 = OptimizationConfig(objective=self.objective) + config2 = OptimizationConfig(objectives=[self.objective]) self.assertEqual(config2.outcome_constraints, []) config2.outcome_constraints = self.outcome_constraints @@ -97,11 +97,11 @@ def test_Init(self) -> None: def test_Eq(self) -> None: config1 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) config2 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) self.assertEqual(config1, config2) @@ -112,15 +112,15 @@ def test_Eq(self) -> None: metric=self.metrics["m2"], op=ComparisonOp.LEQ, bound=0.5 ) config3 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[self.outcome_constraint, new_outcome_constraint], ) self.assertNotEqual(config1, config3) def test_ConstraintValidation(self) -> None: - # Can build OptimizationConfig with MultiObjective + # Can't build OptimizationConfig with MultiObjective with self.assertRaises(ValueError): - OptimizationConfig(objective=self.multi_objective) + OptimizationConfig(objectives=[self.multi_objective]) # Can't constrain on objective metric. with warnings.catch_warnings(): @@ -130,14 +130,14 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, outcome_constraints=[objective_constraint] + objectives=[self.objective], outcome_constraints=[objective_constraint] ) # Using an outcome constraint for ScalarizedObjective should also raise with self.assertRaisesRegex( ValueError, "Cannot constrain on objective metric." ): OptimizationConfig( - objective=self.m2_objective, + objectives=[self.m2_objective], outcome_constraints=[objective_constraint], ) # Two outcome_constraints on the same metric with the same op @@ -151,7 +151,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[self.outcome_constraint, duplicate_constraint], ) @@ -166,7 +166,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints + [opposing_constraint], ) @@ -183,7 +183,7 @@ def test_ConstraintValidation(self) -> None: ) with self.assertRaises(ValueError): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=([self.outcome_constraint, opposing_constraint]), ) @@ -198,7 +198,7 @@ def test_ConstraintValidation(self) -> None: bound=self.outcome_constraint.bound + 1, ) config = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=([self.outcome_constraint, opposing_constraint]), ) self.assertEqual( @@ -208,7 +208,7 @@ def test_ConstraintValidation(self) -> None: # Test with ScalarizedOutcomeConstraint # should work when not constraining obj config_with_scalarized = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[self.scalarized_outcome_constraint], ) self.assertEqual(len(config_with_scalarized.outcome_constraints), 1) @@ -230,24 +230,24 @@ def test_ConstraintValidation(self) -> None: ValueError, "Cannot constrain on objective metric." ): OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=[scalarized_with_objective_metric], ) def test_Clone(self) -> None: config1 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) self.assertEqual(config1, config1.clone()) def test_CloneWithArgs(self) -> None: config1 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], outcome_constraints=self.outcome_constraints, ) config2 = OptimizationConfig( - objective=self.objective, + objectives=[self.objective], ) # Empty args produce exact clone diff --git a/ax/core/tests/test_utils.py b/ax/core/tests/test_utils.py index af1aba3d414..75cb234ff04 100644 --- a/ax/core/tests/test_utils.py +++ b/ax/core/tests/test_utils.py @@ -888,10 +888,12 @@ def test_get_target_trial_index_excludes_lilo_trials(self) -> None: ) exp.add_tracking_metric(pairwise_metric) exp.optimization_config = OptimizationConfig( - objective=Objective( - expression=pairwise_name, - metric_name_to_signature={pairwise_name: pairwise_name}, - ), + objectives=[ + Objective( + expression=pairwise_name, + metric_name_to_signature={pairwise_name: pairwise_name}, + ) + ], ) exp.llm_messages = [LLMMessage(role="system", content="test")] @@ -947,10 +949,12 @@ def test_is_lilo_experiment(self) -> None: ) exp.add_tracking_metric(pairwise_metric) exp.optimization_config = OptimizationConfig( - objective=Objective( - expression=pairwise_name, - metric_name_to_signature={pairwise_name: pairwise_name}, - ), + objectives=[ + Objective( + expression=pairwise_name, + metric_name_to_signature={pairwise_name: pairwise_name}, + ) + ], ) self.assertFalse(is_lilo_experiment(exp)) @@ -1049,7 +1053,7 @@ def test_custom_optimization_config(self) -> None: # Custom config requiring only "branin": COMPLETE. custom_config = OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=False), + objectives=[Objective(metric=Metric(name="branin"), minimize=False)], ) result = compute_metric_availability( experiment=exp, optimization_config=custom_config @@ -1058,7 +1062,7 @@ def test_custom_optimization_config(self) -> None: # Custom config requiring an unrelated metric: INCOMPLETE. other_config = OptimizationConfig( - objective=Objective(metric=Metric(name="branin"), minimize=False), + objectives=[Objective(metric=Metric(name="branin"), minimize=False)], outcome_constraints=[ OutcomeConstraint( metric=Metric(name="other_metric"), @@ -1118,7 +1122,7 @@ def test_curve_data(self) -> None: exp.add_metric(Metric(name="metric_a")) exp.add_metric(Metric(name="metric_b")) exp.optimization_config = OptimizationConfig( - objective=Objective(metric=Metric(name="metric_a"), minimize=False), + objectives=[Objective(metric=Metric(name="metric_a"), minimize=False)], outcome_constraints=[ OutcomeConstraint( metric=Metric(name="metric_b"),