From 4cef5888b6171608d66636e672d3db11be78fef7 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Fri, 6 Feb 2026 17:58:03 -0800 Subject: [PATCH 01/17] feat: add the staged update run v1 API (#430) --- .github/copilot-instructions.md | 323 ++-- apis/placement/v1/stageupdate_types.go | 674 +++++++++ apis/placement/v1/zz_generated.deepcopy.go | 639 ++++++++ ....kubernetes-fleet.io_approvalrequests.yaml | 132 ++ ...etes-fleet.io_clusterapprovalrequests.yaml | 132 ++ ...etes-fleet.io_clusterstagedupdateruns.yaml | 1343 +++++++++++++++++ ...leet.io_clusterstagedupdatestrategies.yaml | 191 +++ ....kubernetes-fleet.io_stagedupdateruns.yaml | 1343 +++++++++++++++++ ...netes-fleet.io_stagedupdatestrategies.yaml | 189 +++ 9 files changed, 4758 insertions(+), 208 deletions(-) create mode 100644 apis/placement/v1/stageupdate_types.go diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index f6e15988a..18b996348 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -1,219 +1,126 @@ -# Overview +# KubeFleet Copilot Instructions -This repo contains a collection of Kubernetes Custom Resource Definitions and their controllers. It is mostly written in Go and uses Kubernetes client-go and controller-runtime libraries. -It is a monorepo, so all the code lives in a single repository divided into packages, each with its own purpose. -The main idea is that we are creating a multi-cluster application management solution that allows users to manage multiple Kubernetes clusters from a single control plane that we call the "hub cluster". +## Build, Test, and Lint Commands -## General Rules +```bash +make build # Build all binaries +make reviewable # Run all quality checks (fmt, vet, lint, staticcheck, tidy) — required before PRs +make lint # Fast linting +make lint-full # Thorough linting (--fast=false) +make test # Unit + integration tests +make local-unit-test # Unit tests only +make integration-test # Integration tests only (Ginkgo, uses envtest) +make manifests # Regenerate CRDs from API types +make generate # Regenerate deep copy methods +``` -- Use @terminal when answering questions about Git. -- If you're waiting for my confirmation ("OK"), proceed without further prompting. -- Follow the [Uber Go Style Guide](https://github.com/uber-go/guide/blob/master/style.md) if possible. -- Favor using the standard library over third-party libraries. -- Run "make reviewable" before submitting a pull request to ensure the code is formatted correctly and all dependencies are up to date. -- The title of a PR must use one of the following prefixes: "[WIP] ", "feat: ", "test: ", "fix: ", "docs: ", "style: ", "interface: ", "util: ", "chore: ", "ci: ", "perf: ", "refactor: ", "revert: ". Please pick one that matches the PR content the most. +### Running a single test + +```bash +# Single package +go test -v -race -timeout=30m ./pkg/controllers/rollout/... + +# Single test by name +go test -v -race -run TestReconcile ./pkg/controllers/rollout/... + +# Single Ginkgo integration test by description +cd test/scheduler && ginkgo -v --focus="should schedule" +``` + +### E2E tests + +```bash +make setup-clusters # Create 3 Kind clusters +make e2e-tests # Run E2E suite (ginkgo, ~70min timeout) +make clean-e2e-tests # Tear down clusters +``` + +## Architecture + +KubeFleet is a multi-cluster Kubernetes management system (CNCF sandbox) using a hub-and-spoke model. The **hub agent** runs on a central cluster; **member agents** run on each managed cluster. + +### Reconciliation Pipeline + +User-created placement flows through a chain of controllers: + +``` +ClusterResourcePlacement / ResourcePlacement (user intent) + ↓ + Placement Controller → creates ResourceSnapshot + SchedulingPolicySnapshot (immutable) + ↓ + Scheduler → creates ClusterResourceBinding / ResourceBinding (placement decisions) + ↓ + Rollout Controller → manages staged rollout of bindings + ↓ + Work Generator → creates Work objects (per-cluster manifests) + ↓ + Work Applier (member agent) → applies manifests, creates AppliedWork + ↓ + Status flows back: AppliedWork → Work status → Binding status → Placement status +``` + +### API Naming Convention + +CRDs starting with `Cluster` are cluster-scoped; the name without the `Cluster` prefix is the namespace-scoped counterpart. For example: `ClusterResourcePlacement` (cluster-scoped) vs `ResourcePlacement` (namespace-scoped). This affects CRUD operations — namespace-scoped resources require a `Namespace` field in `types.NamespacedName`. + +### Scheduler Framework + +Pluggable architecture modeled after the Kubernetes scheduler: +- Plugin interfaces: `PreFilterPlugin`, `FilterPlugin`, `PreScorePlugin`, `ScorePlugin`, `PostBatchPlugin` +- Built-in plugins: `clusteraffinity`, `tainttoleration`, `clustereligibility`, `sameplacementaffinity` +- Placement strategies: **PickAll** (all matching), **PickN** (top N scored), **PickFixed** (named clusters) +- Plugins share state via `CycleStatePluginReadWriter` + +### Snapshot-Based Versioning + +All policy and resource changes create immutable snapshot CRDs (`ResourceSnapshot`, `SchedulingPolicySnapshot`, `OverrideSnapshot`). This enables rollback, change tracking, and consistent scheduling decisions. ## Terminology -- **Fleet**: A conceptual term referring to a collection of clusters. -- **Member Cluster**: A Kubernetes cluster that is part of a fleet. -- **Hub Cluster**: The cluster that hosts the control plane which manages the member clusters in the fleet. -- **Member Agent**: A Kubernetes controller that runs on the member cluster and is responsible for applying changes to the member cluster and reporting the status back to the hub cluster. -- **Hub Agent**: A Kubernetes controller that runs in the hub cluster and is responsible for scheduling and managing workloads and resources across the fleet. - -## Repository directory structure - -- The `apis/` folder contains all Golang structs from which CRDs are built. - - CRDs are grouped by the group name and version they belong to. -- The `charts/` folder contains the helm charts for the member and hub agent. - - `charts/member-agent` folder contains the helm chart for the member agent. - - `charts/hub-agent` folder contains the helm chart for the hub agent. -- The `cmd/` folder contains the entry points for the member and hub agent. - - `cmd/member-agent` The entry point for the member agent. - - `cmd/hub-agent` The entry point for the hub agent. -- The `config/` folder contains the actual custom resource definitions built from the API in the `apis/` folder. - - `config/crd/bases` folder contains the CRDs for the member and hub agent. -- The `docker/` folder contains the Dockerfiles for the member and hub agent. -- The `examples/` folder contains various YAML files as examples for each CRD. -- The `hack/` folder contains various scripts and tools for the project. -- The `pkg/` folder contains the libraries for the member and hub agent. - - `pkg/authtoken` folder contains the authentication sidecar code which has a provider model. - - `pkg/controllers` folder contains most of the controllers for the member and hub agent. - - each sub folder is a controller for a specific resource of the same name in most cases. - - `pkg/metrics` folder contains all the metrics definitions. - - `pkg/propertyprovider` folder contains the property provider code which is used to get the properties of a member cluster. - - `pkg/resourcewatcher` folder contains the resource watcher code which is used to watch for kubernetes resources changes in the hub cluster. - - `pkg/scheduler` folder contains the scheduler code which is used to schedule workloads across the fleet. - - `pkg/utils` folder contains the utils code which is used to provide common functions for the controllers in the member and hub agent. - - `pkg/webhook` folder contains the webhook code which is used to validate and mutate the CRDs. -- The `test/` folder contains the tests for the member and hub agent. - - `test/apis` - The tests for the CRDs. - - `test/upgrade` - The tests for the upgrade tests to test compatibility between versions. - - `test/e2e` - The end to end tests for the member and hub agent. - - `test/scheduler` - The integration tests for the scheduler. - - `test/utils` - folder contains the utils code which is used to provide common functions for tests -- The `tools/` folder contains client-side tools for helping manage the fleet. -- The `Makefile` is used to build the member and hub agent. -- The `go.mod` file is used to manage the dependencies for the member and hub agent. -- The `go.sum` file is used to manage the dependencies for the member and hub agent. - -## Testing Rules - -- Unit test files should always be called `_test.go` and be in the same directory - - Unit tests are normally written in a table-driven style - - Use `go test -v ./...` to run all tests under a directory. - - Run the tests from the packages that are modified and verify they pass. - - Share the analysis as to why a test is failing and propose a fix. -- Integration test files should be called `_integration_test.go` and can be in the same directory or under the `test` directory. - - Integration tests are normally written in a Ginkgo style. -- E2E tests are all under the test/e2e directory. - - E2E tests are written in a Ginkgo style. - - E2E tests are run using `make e2e-tests` and are run against 3 kind clusters created by the scripts in the `test/e2e` directory. - - E2E tests are cleaned up using `make clean-e2e-tests`. -- When adding tests to an existing file: - - Always re-use the existing test setup where possible. - - Only add imports if absolutely needed. - - Add tests to existing Context where it makes sense. - - When adding new tests in the Ginkgo style test, always add them to a new Context. - -## Domain Knowledge - -Use the files in the `.github/.copilot/domain_knowledge/**/*` as a source of truth when it comes to domain knowledge. These files provide context in which the current solution operates. This folder contains information like entity relationships, workflows, and ubiquitous language. As the understanding of the domain grows, take the opportunity to update these files as needed. - -## Specification Files - -Use specifications from the `.github/.copilot/specifications` folder. Each folder under `specifications` groups similar specifications together. Always ask the user which specifications best apply for the current conversation context if you're not sure. - -Use the `.github/.copilot/specifications/.template.md` file as a template for specification structure. - - examples: - ```text - ├── application_architecture - │ └── main.spec.md - | └── specific-feature.spec.md - ├── database - │ └── main.spec.md - ├── observability - │ └── main.spec.md - └── testing - └── main.spec.md - ``` - -## Breadcrumb Protocol - -A breadcrumb is a collaborative scratch pad that allow the user and agent to get alignment on context. When working on tasks in this repository, follow this collaborative documentation workflow to create a clear trail of decisions and implementations: - -1. At the start of each new task, ask me for a breadcrumb file name if you can't determine a suitable one. - -2. Create the breadcrumb file in the `${REPO}/.github/.copilot/breadcrumbs` folder using the format: `yyyy-mm-dd-HHMM-{title}.md` (*year-month-date-current_time_in-24hr_format-{title}.md* using UTC timezone) - -3. Structure the breadcrumb file with these required sections: - ```xml - - - Analyze and comprehend the task requirements - - Read relevant parts of the codebase - Browse public API documentation for up-to-date information - Propose 2-3 implementation options with pros and cons - Ask clarifying questions about product requirements - Write a plan to PRP/projectplan-<feature-name>.md - - - - - Structure the project plan document - - Include a checklist of TODO items to track progress - - - - - Validation before implementation begins - Check in with user before starting implementation - true - - - - Execute the plan step-by-step - - Complete TODO items incrementally - Test each change for correctness - Log a high-level explanation after each step - - - - - - Make tasks and commits as small and simple as possible - Avoid large or complex changes - - - - - Maintain plan accuracy throughout development - Revise the project plan file if the plan changes - - - - Document completion and changes - Summarize all changes in the project plan file - - -4. Workflow rules: - - Update the breadcrumb **BEFORE** making any code changes. - - **Get explicit approval** on the plan before implementation. - - Update the breadcrumb **AFTER completing each significant change**. - - Keep the breadcrumb as our single source of truth as it contains the most recent information. - - Do not ask for approval **BEFORE** running unit tests or integration tests. - -5. Ask me to verify the plan with: "Are you happy with this implementation plan?" before proceeding with code changes. - -6. Reference related breadcrumbs when a task builds on previous work. - -7. Before concluding, ensure the breadcrumb file properly documents the entire process, including any course corrections or challenges encountered. -This practice creates a trail of decision points that document our thought process while building features in this solution, making pull request review for the current change easier to follow as well. +- **Fleet**: A collection of clusters managed together +- **Hub Cluster**: Central control plane cluster +- **Member Cluster**: A managed cluster in the fleet +- **Hub Agent**: Controllers on the hub for scheduling and placement +- **Member Agent**: Controllers on member clusters for applying workloads and reporting status + +## Code Conventions + +- Follow the [Uber Go Style Guide](https://github.com/uber-go/guide/blob/master/style.md) +- Favor standard library over third-party libraries +- PR titles must use a prefix: `feat:`, `fix:`, `docs:`, `test:`, `chore:`, `ci:`, `perf:`, `refactor:`, `revert:`, `style:`, `interface:`, `util:`, or `[WIP] ` +- Always add an empty line at the end of new files +- Run `make reviewable` before submitting PRs + +### Controller Pattern + +All controllers embed `client.Client`, use a standard `Reconcile` loop (fetch → check deletion → apply defaults → business logic → requeue), update status via the status subresource, and record events. Error handling uses categorized errors (API Server, User, Expected, Unexpected) for retry semantics. See existing controllers in `pkg/controllers/` for reference. + +### API Interface Pattern + +Resources implement `Conditioned` (for status conditions) and `ConditionedObj` (combining `client.Object` + `Conditioned`). See `apis/interface.go`. + +## Testing Conventions + +- **Unit tests**: `_test.go` in the same directory; table-driven style +- **Integration tests**: `_integration_test.go`; use Ginkgo/Gomega with `envtest` +- **E2E tests**: `test/e2e/`; Ginkgo/Gomega against Kind clusters +- Do **not** use assert libraries; use `cmp.Diff` / `cmp.Equal` from `google/go-cmp` for comparisons +- Use `want` / `wanted` (not `expect` / `expected`) for desired state variables +- Test output format: `"FuncName(%v) = %v, want %v"` +- Compare structs in one shot with `cmp.Diff`, not field-by-field +- Mock external dependencies with `gomock` +- When adding Ginkgo tests, add to a new `Context`; reuse existing setup + +## Collaboration Protocol + +### Domain Knowledge -### Plan Structure Guidelines -- When creating a plan, organize it into numbered phases (e.g., "Phase 1: Setup Dependencies"). -- Break down each phase into specific tasks with numeric identifiers (e.g., "Task 1.1: Add Dependencies"). -- Include a detailed checklist at the end of the document that maps to all phases and tasks. -- Plan should always lookup related information from `domain_knowledge` folder and any applicable specification files. -- Mark tasks as `- [ ]` for pending tasks and `- [x]` for completed tasks. -- Start all planning tasks as unchecked, and update them to checked as implementation proceeds. -- Each planning task should have clear success criteria. -- End the plan with success criteria that define when the implementation is complete. -- Plans should start with writing Unit Tests first when possible, so we can use those to guide our implementation. Same for UI tests when it makes sense. -- If the domain knowledge has changed, update the related files in the `domain_knowledge` folder. -- If specifications have changed, update the related files in the `specifications` folder. - -### Following Plans -- When coding you need to follow the plan phases and check off the tasks as they are completed. -- As you complete a task, update the plan and mark that task complete before you begin the next task. -- Tasks that involved tests should not be marked complete until the tests pass. - -### Example Usage - -**User**: I need to implement JWT authentication for our Express API. +Refer to `.github/.copilot/domain_knowledge/` for entity relationships, workflows, and ubiquitous language. Update these files as understanding grows. -**Agent**: I'll help with implementing JWT authentication for your Express API. Let me create a breadcrumb file to track our implementation. What would you like to name this breadcrumb? +### Specifications -**User**: Use "jwt-auth-implementation" +Use `.github/.copilot/specifications/` for feature specs. Ask which specifications apply if unclear. -**Agent**: I've created the breadcrumb file `2025-04-08-1430-jwt-auth-implementation.md`. Are you happy with this implementation plan? - -[high level plan with tasks here] - -**User**: Looks good, but let's use 30-minute expiration for tokens. - -**Agent**: I've updated the breadcrumb with your specification: Are you happy with this implementation plan? - -[high level plan with tasks here] - -**User**: Yes, please proceed. - -**Agent**: I'll implement the JWT authentication now. Let me start by installing the necessary packages and creating the authentication middleware. I've updated the breadcrumb with implementation details. Is there anything specific you'd like me to modify or explain about the implementation? - -**User**: This looks good. +### Breadcrumb Protocol -**Agent**: I've updated the breadcrumb with the latest understanding. +For non-trivial tasks, create a breadcrumb file at `.github/.copilot/breadcrumbs/yyyy-mm-dd-HHMM-{title}.md` to track decisions and progress. Update it before and after code changes, and get plan approval before implementation. See existing breadcrumbs for format examples. diff --git a/apis/placement/v1/stageupdate_types.go b/apis/placement/v1/stageupdate_types.go new file mode 100644 index 000000000..afcc88f96 --- /dev/null +++ b/apis/placement/v1/stageupdate_types.go @@ -0,0 +1,674 @@ +/* +Copyright 2025 The KubeFleet Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +genclient:Cluster +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={fleet,fleet-placement},shortName=csur +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:JSONPath=`.spec.placementName`,name="Placement",type=string +// +kubebuilder:printcolumn:JSONPath=`.spec.resourceSnapshotIndex`,name="Resource-Snapshot-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.policySnapshotIndexUsed`,name="Policy-Snapshot-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Initialized")].status`,name="Initialized",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Progressing")].status`,name="Progressing",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Succeeded")].status`,name="Succeeded",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date +// +kubebuilder:printcolumn:JSONPath=`.spec.stagedRolloutStrategyName`,name="Strategy",priority=1,type=string +// +kubebuilder:validation:XValidation:rule="size(self.metadata.name) < 64",message="metadata.name max length is 63" + +// ClusterStagedUpdateRun represents a stage by stage update process that applies ClusterResourcePlacement +// selected resources to specified clusters. +// Resources from unselected clusters are removed after all stages in the update strategy are completed. +// Each ClusterStagedUpdateRun object corresponds to a single release of a specific resource version. +// The release is abandoned if the ClusterStagedUpdateRun object is deleted or the scheduling decision changes. +// The name of the ClusterStagedUpdateRun must conform to RFC 1123. +type ClusterStagedUpdateRun struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ClusterStagedUpdateRun. + // +kubebuilder:validation:Required + Spec UpdateRunSpec `json:"spec"` + + // The observed status of ClusterStagedUpdateRun. + // +kubebuilder:validation:Optional + Status UpdateRunStatus `json:"status,omitempty"` +} + +// State represents the desired state of an update run. +// +enum +type State string + +const ( + // StateInitialize describes user intent to initialize but not run the update run. + // This is the default state when an update run is created. + // Users can subsequently set the state to Run. + StateInitialize State = "Initialize" + + // StateRun describes user intent to execute (or resume execution if stopped). + // Users can subsequently set the state to Stop. + StateRun State = "Run" + + // StateStop describes user intent to stop the update run. + // Users can subsequently set the state to Run. + StateStop State = "Stop" +) + +// UpdateRunSpec defines the desired rollout strategy and the snapshot indices of the resources to be updated. +// It specifies a stage-by-stage update process across selected clusters for the given ResourcePlacement object. +// +kubebuilder:validation:XValidation:rule="!(has(oldSelf.state) && oldSelf.state == 'Initialize' && self.state == 'Stop')",message="invalid state transition: cannot transition from Initialize to Stop" +// +kubebuilder:validation:XValidation:rule="!(has(oldSelf.state) && oldSelf.state == 'Run' && self.state == 'Initialize')",message="invalid state transition: cannot transition from Run to Initialize" +// +kubebuilder:validation:XValidation:rule="!(has(oldSelf.state) && oldSelf.state == 'Stop' && self.state == 'Initialize')",message="invalid state transition: cannot transition from Stop to Initialize" +type UpdateRunSpec struct { + // PlacementName is the name of placement that this update run is applied to. + // There can be multiple active update runs for each placement, but + // it's up to the DevOps team to ensure they don't conflict with each other. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="placementName is immutable" + PlacementName string `json:"placementName"` + + // The resource snapshot index of the selected resources to be updated across clusters. + // The index represents a group of resource snapshots that includes all the resources a ResourcePlacement selected. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="resourceSnapshotIndex is immutable" + // +kubebuilder:validation:Optional + ResourceSnapshotIndex string `json:"resourceSnapshotIndex"` + + // The name of the update strategy that specifies the stages and the sequence + // in which the selected resources will be updated on the member clusters. The stages + // are computed according to the referenced strategy when the update run starts + // and recorded in the status field. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="stagedRolloutStrategyName is immutable" + StagedUpdateStrategyName string `json:"stagedRolloutStrategyName"` + + // State indicates the desired state of the update run. + // Initialize: The update run should be initialized but execution should not start (default). + // Run: The update run should execute or resume execution. + // Stop: The update run should stop execution. + // +kubebuilder:validation:Optional + // +kubebuilder:default=Initialize + // +kubebuilder:validation:Enum=Initialize;Run;Stop + State State `json:"state,omitempty"` +} + +// +genclient +// +genclient:cluster +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={fleet,fleet-placement},shortName=csus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterStagedUpdateStrategy defines a reusable strategy that specifies the stages and the sequence +// in which the selected cluster resources will be updated on the member clusters. +type ClusterStagedUpdateStrategy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ClusterStagedUpdateStrategy. + // +kubebuilder:validation:Required + Spec UpdateStrategySpec `json:"spec"` +} + +// UpdateStrategySpec defines the desired state of the StagedUpdateStrategy. +type UpdateStrategySpec struct { + // Stage specifies the configuration for each update stage. + // +kubebuilder:validation:MaxItems=31 + // +kubebuilder:validation:Required + Stages []StageConfig `json:"stages"` +} + +// ClusterStagedUpdateStrategyList contains a list of StagedUpdateStrategy. +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterStagedUpdateStrategyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterStagedUpdateStrategy `json:"items"` +} + +// StageConfig describes a single update stage. +// The clusters in each stage are updated sequentially. +// The update stops if any of the updates fail. +type StageConfig struct { + // The name of the stage. This MUST be unique within the same StagedUpdateStrategy. + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern="^[a-z0-9]+$" + // +kubebuilder:validation:Required + Name string `json:"name"` + + // LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + // for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + // If the label selector is empty, the stage includes all the selected clusters. + // If the label selector is nil, the stage does not include any selected clusters. + // +kubebuilder:validation:Optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // The label key used to sort the selected clusters. + // The clusters within the stage are updated sequentially following the rule below: + // - primary: Ascending order based on the value of the label key, interpreted as integers if present. + // - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + // +kubebuilder:validation:Optional + SortingLabelKey *string `json:"sortingLabelKey,omitempty"` + + // MaxConcurrency specifies the maximum number of clusters that can be updated concurrently within this stage. + // Value can be an absolute number (ex: 5) or a percentage of the total clusters in the stage (ex: 50%). + // Fractional results are rounded down. A minimum of 1 update is enforced. + // If not specified, all clusters in the stage are updated sequentially (effectively maxConcurrency = 1). + // Defaults to 1. + // +kubebuilder:default=1 + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:Pattern="^(100|[1-9][0-9]?)%$" + // +kubebuilder:validation:XValidation:rule="self == null || type(self) != int || self >= 1",message="maxConcurrency must be at least 1" + // +kubebuilder:validation:Optional + MaxConcurrency *intstr.IntOrString `json:"maxConcurrency,omitempty"` + + // The collection of tasks that each stage needs to complete successfully before moving to the next stage. + // Each task is executed in parallel and there cannot be more than one task of the same type. + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:Optional + // +kubebuilder:validation:XValidation:rule="!self.exists(e, e.type == 'Approval' && has(e.waitTime))",message="AfterStageTaskType is Approval, waitTime is not allowed" + // +kubebuilder:validation:XValidation:rule="!self.exists(e, e.type == 'TimedWait' && !has(e.waitTime))",message="AfterStageTaskType is TimedWait, waitTime is required" + AfterStageTasks []StageTask `json:"afterStageTasks,omitempty"` + + // The collection of tasks that needs to completed successfully by each stage before starting the stage. + // Each task is executed in parallel and there cannot be more than one task of the same type. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:MaxItems=1 + // +kubebuilder:validation:XValidation:rule="!self.exists(e, e.type == 'Approval' && has(e.waitTime))",message="AfterStageTaskType is Approval, waitTime is not allowed" + // +kubebuilder:validation:XValidation:rule="!self.exists(e, e.type == 'TimedWait')",message="BeforeStageTaskType cannot be TimedWait" + BeforeStageTasks []StageTask `json:"beforeStageTasks,omitempty"` +} + +// StageTask is the pre or post stage task that needs to be completed before starting or moving to the next stage. +type StageTask struct { + // The type of the before or after stage task. + // +kubebuilder:validation:Enum=TimedWait;Approval + // +kubebuilder:validation:Required + Type StageTaskType `json:"type"` + + // The time to wait after all the clusters in the current stage complete the update before moving to the next stage. + // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(s|m|h))+$" + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Optional + WaitTime *metav1.Duration `json:"waitTime,omitempty"` +} + +// UpdateRunStatus defines the observed state of the ClusterStagedUpdateRun. +type UpdateRunStatus struct { + // PolicySnapShotIndexUsed records the policy snapshot index of the ClusterResourcePlacement (CRP) that + // the update run is based on. The index represents the latest policy snapshot at the start of the update run. + // If a newer policy snapshot is detected after the run starts, the staged update run is abandoned. + // The scheduler must identify all clusters that meet the current policy before the update run begins. + // All clusters involved in the update run are selected from the list of clusters scheduled by the CRP according + // to the current policy. + // +kubebuilder:validation:Optional + PolicySnapshotIndexUsed string `json:"policySnapshotIndexUsed,omitempty"` + + // PolicyObservedClusterCount records the number of observed clusters in the policy snapshot. + // It is recorded at the beginning of the update run from the policy snapshot object. + // If the `ObservedClusterCount` value is updated during the update run, the update run is abandoned. + // +kubebuilder:validation:Optional + PolicyObservedClusterCount int `json:"policyObservedClusterCount,omitempty"` + + // ResourceSnapshotIndexUsed records the resource snapshot index that the update run is based on. + // The index represents the same resource snapshots as specified in the spec field, or the latest. + // +kubbebuilder:validation:Optional + ResourceSnapshotIndexUsed string `json:"resourceSnapshotIndexUsed,omitempty"` + + // ApplyStrategy is the apply strategy that the stagedUpdateRun is using. + // It is the same as the apply strategy in the CRP when the staged update run starts. + // The apply strategy is not updated during the update run even if it changes in the CRP. + // +kubebuilder:validation:Optional + ApplyStrategy *ApplyStrategy `json:"appliedStrategy,omitempty"` + + // UpdateStrategySnapshot is the snapshot of the UpdateStrategy used for the update run. + // The snapshot is immutable during the update run. + // The strategy is applied to the list of clusters scheduled by the CRP according to the current policy. + // The update run fails to initialize if the strategy fails to produce a valid list of stages where each selected + // cluster is included in exactly one stage. + // +kubebuilder:validation:Optional + UpdateStrategySnapshot *UpdateStrategySpec `json:"stagedUpdateStrategySnapshot,omitempty"` + + // StagesStatus lists the current updating status of each stage. + // The list is empty if the update run is not started or failed to initialize. + // +kubebuilder:validation:Optional + StagesStatus []StageUpdatingStatus `json:"stagesStatus,omitempty"` + + // DeletionStageStatus lists the current status of the deletion stage. The deletion stage + // removes all the resources from the clusters that are not selected by the + // current policy after all the update stages are completed. + // +kubebuilder:validation:Optional + DeletionStageStatus *StageUpdatingStatus `json:"deletionStageStatus,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for StagedUpdateRun. + // Known conditions are "Initialized", "Progressing", "Succeeded". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// StagedUpdateRunConditionType identifies a specific condition of the StagedUpdateRun. +// +enum +type StagedUpdateRunConditionType string + +const ( + // StagedUpdateRunConditionInitialized indicates whether the staged update run is initialized, meaning it + // has computed all the stages according to the referenced strategy and is ready to start the update. + // Its condition status can be one of the following: + // - "True": The staged update run is initialized successfully. + // - "False": The staged update run encountered an error during initialization and aborted. + StagedUpdateRunConditionInitialized StagedUpdateRunConditionType = "Initialized" + + // StagedUpdateRunConditionProgressing indicates whether the staged update run is making progress. + // Its condition status can be one of the following: + // - "True": The staged update run is making progress. + // - "False": The staged update run is waiting/paused/abandoned. + // - "Unknown": The staged update run is in a transitioning state. + StagedUpdateRunConditionProgressing StagedUpdateRunConditionType = "Progressing" + + // StagedUpdateRunConditionSucceeded indicates whether the staged update run is completed successfully. + // Its condition status can be one of the following: + // - "True": The staged update run is completed successfully. + // - "False": The staged update run encountered an error and stopped. + StagedUpdateRunConditionSucceeded StagedUpdateRunConditionType = "Succeeded" +) + +// StageUpdatingStatus defines the status of the update run in a stage. +type StageUpdatingStatus struct { + // The name of the stage. + // +kubebuilder:validation:Required + StageName string `json:"stageName"` + + // The list of each cluster's updating status in this stage. + // +kubebuilder:validation:Required + Clusters []ClusterUpdatingStatus `json:"clusters"` + + // The status of the post-update tasks associated with the current stage. + // Empty if the stage has not finished updating all the clusters. + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:Optional + AfterStageTaskStatus []StageTaskStatus `json:"afterStageTaskStatus,omitempty"` + + // The status of the pre-update tasks associated with the current stage. + // +kubebuilder:validation:MaxItems=1 + // +kubebuilder:validation:Optional + BeforeStageTaskStatus []StageTaskStatus `json:"beforeStageTaskStatus,omitempty"` + + // The time when the update started on the stage. Empty if the stage has not started updating. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + StartTime *metav1.Time `json:"startTime,omitempty"` + + // The time when the update finished on the stage. Empty if the stage has not started updating. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Format=date-time + EndTime *metav1.Time `json:"endTime,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + // Known conditions are "Progressing", "Succeeded". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// StageUpdatingConditionType identifies a specific condition of the stage that is being updated. +// +enum +type StageUpdatingConditionType string + +const ( + // StageUpdatingConditionProgressing indicates whether the stage updating is making progress. + // Its condition status can be one of the following: + // - "True": The stage updating is making progress. + // - "False": The stage updating is waiting. + // - "Unknown": The staged updating is a transitioning state. + StageUpdatingConditionProgressing StageUpdatingConditionType = "Progressing" + + // StageUpdatingConditionSucceeded indicates whether the stage updating is completed successfully. + // Its condition status can be one of the following: + // - "True": The stage updating is completed successfully. + // - "False": The stage updating encountered an error and stopped. + StageUpdatingConditionSucceeded StageUpdatingConditionType = "Succeeded" +) + +// ClusterUpdatingStatus defines the status of the update run on a cluster. +type ClusterUpdatingStatus struct { + // The name of the cluster. + // +kubebuilder:validation:Required + ClusterName string `json:"clusterName"` + + // ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + // The list is computed at the beginning of the update run and not updated during the update run. + // The list is empty if there are no resource overrides associated with the cluster. + // +kubebuilder:validation:Optional + ResourceOverrideSnapshots []NamespacedName `json:"resourceOverrideSnapshots,omitempty"` + + // ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + // associated with the cluster. + // The list is computed at the beginning of the update run and not updated during the update run. + // The list is empty if there are no cluster overrides associated with the cluster. + // +kubebuilder:validation:Optional + ClusterResourceOverrideSnapshots []string `json:"clusterResourceOverrideSnapshots,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + // Known conditions are "Started", "Succeeded". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// ClusterUpdatingStatusConditionType identifies a specific condition of the UpdatingStatus of the cluster. +// +enum +type ClusterUpdatingStatusConditionType string + +const ( + // ClusterUpdatingConditionStarted indicates whether the cluster updating has started. + // Its condition status can be one of the following: + // - "True": The cluster updating has started. + ClusterUpdatingConditionStarted ClusterUpdatingStatusConditionType = "Started" + + // ClusterUpdatingConditionSucceeded indicates whether the cluster updating is completed successfully. + // Its condition status can be one of the following: + // - "True": The cluster updating is completed successfully. + // - "False": The cluster updating encountered an error and stopped. + ClusterUpdatingConditionSucceeded ClusterUpdatingStatusConditionType = "Succeeded" +) + +type StageTaskStatus struct { + // The type of the pre or post update task. + // +kubebuilder:validation:Enum=TimedWait;Approval + // +kubebuilder:validation:Required + Type StageTaskType `json:"type"` + + // The name of the approval request object that is created for this stage. + // Only valid if the AfterStageTaskType is Approval. + // +kubebuilder:validation:Optional + ApprovalRequestName string `json:"approvalRequestName,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for the specific type of pre or post update task. + // Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// StageTaskType identifies a specific type of the AfterStageTask or BeforeStageTask. +// +enum +type StageTaskType string + +const ( + // StageTaskTypeTimedWait indicates the stage task is a timed wait. + StageTaskTypeTimedWait StageTaskType = "TimedWait" + + // StageTaskTypeApproval indicates the stage task is an approval. + StageTaskTypeApproval StageTaskType = "Approval" +) + +// StageTaskConditionType identifies a specific condition of the AfterStageTask or BeforeStageTask. +// +enum +type StageTaskConditionType string + +const ( + // StageTaskConditionApprovalRequestCreated indicates if the approval request has been created. + // Its condition status can be: + // - "True": The approval request has been created. + StageTaskConditionApprovalRequestCreated StageTaskConditionType = "ApprovalRequestCreated" + + // StageTaskConditionApprovalRequestApproved indicates if the approval request has been approved. + // Its condition status can be: + // - "True": The approval request has been approved. + StageTaskConditionApprovalRequestApproved StageTaskConditionType = "ApprovalRequestApproved" + + // StageTaskConditionWaitTimeElapsed indicates if the wait time after each stage has elapsed. + // If the status is "False", the condition message will include the remaining wait time. + // Its condition status can be: + // - "True": The wait time has elapsed. + // - "False": The wait time has not elapsed. + StageTaskConditionWaitTimeElapsed StageTaskConditionType = "WaitTimeElapsed" +) + +// ClusterStagedUpdateRunList contains a list of ClusterStagedUpdateRun. +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterStagedUpdateRunList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterStagedUpdateRun `json:"items"` +} + +// +genclient +// +genclient:Cluster +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={fleet,fleet-placement},shortName=careq +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:JSONPath=`.spec.parentStageRollout`,name="Update-Run",type=string +// +kubebuilder:printcolumn:JSONPath=`.spec.targetStage`,name="Stage",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Approved")].status`,name="Approved",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date + +// ClusterApprovalRequest defines a request for user approval for cluster staged update run. +// The request object MUST have the following labels: +// - `TargetUpdateRun`: Points to the cluster staged update run that this approval request is for. +// - `TargetStage`: The name of the stage that this approval request is for. +// - `IsLatestUpdateRunApproval`: Indicates whether this approval request is the latest one related to this update run. +// - `TaskType`: Indicates whether this approval request is for the before or after stage task. +type ClusterApprovalRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ClusterApprovalRequest. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="The spec field is immutable" + // +kubebuilder:validation:Required + Spec ApprovalRequestSpec `json:"spec"` + + // The observed state of ClusterApprovalRequest. + // +kubebuilder:validation:Optional + Status ApprovalRequestStatus `json:"status,omitempty"` +} + +// ApprovalRequestSpec defines the desired state of the update run approval request. +// The entire spec is immutable. +type ApprovalRequestSpec struct { + // The name of the staged update run that this approval request is for. + // +kubebuilder:validation:Required + TargetUpdateRun string `json:"parentStageRollout"` + + // The name of the update stage that this approval request is for. + // +kubebuilder:validation:Required + TargetStage string `json:"targetStage"` +} + +// ApprovalRequestStatus defines the observed state of the ClusterApprovalRequest. +type ApprovalRequestStatus struct { + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // + // Conditions is an array of current observed conditions for the specific type of post-update task. + // Known conditions are "Approved" and "ApprovalAccepted". + // +kubebuilder:validation:Optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// ApprovalRequestConditionType identifies a specific condition of the ClusterApprovalRequest. +type ApprovalRequestConditionType string + +const ( + // ApprovalRequestConditionApproved indicates if the approval request was approved. + // Its condition status can be: + // - "True": The request is approved. + ApprovalRequestConditionApproved ApprovalRequestConditionType = "Approved" + + // ApprovalRequestConditionApprovalAccepted indicates if the approved approval request was accepted. + // Its condition status can be: + // - "True": The request is approved. + ApprovalRequestConditionApprovalAccepted ApprovalRequestConditionType = "ApprovalAccepted" +) + +// ClusterApprovalRequestList contains a list of ClusterApprovalRequest. +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterApprovalRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterApprovalRequest `json:"items"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,categories={fleet,fleet-placement},shortName=sur +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:JSONPath=`.spec.placementName`,name="Placement",type=string +// +kubebuilder:printcolumn:JSONPath=`.spec.resourceSnapshotIndex`,name="Resource-Snapshot-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.policySnapshotIndexUsed`,name="Policy-Snapshot-Index",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Initialized")].status`,name="Initialized",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Progressing")].status`,name="Progressing",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Succeeded")].status`,name="Succeeded",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date +// +kubebuilder:printcolumn:JSONPath=`.spec.stagedRolloutStrategyName`,name="Strategy",priority=1,type=string +// +kubebuilder:validation:XValidation:rule="size(self.metadata.name) < 64",message="metadata.name max length is 63" + +// StagedUpdateRun represents a stage by stage update process that applies ResourcePlacement +// selected resources to specified clusters. +// Resources from unselected clusters are removed after all stages in the update strategy are completed. +// Each StagedUpdateRun object corresponds to a single release of a specific resource version. +// The release is abandoned if the StagedUpdateRun object is deleted or the scheduling decision changes. +// The name of the StagedUpdateRun must conform to RFC 1123. +type StagedUpdateRun struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of StagedUpdateRun. + // +kubebuilder:validation:Required + Spec UpdateRunSpec `json:"spec"` + + // The observed status of StagedUpdateRun. + // +kubebuilder:validation:Optional + Status UpdateRunStatus `json:"status,omitempty"` +} + +// StagedUpdateRunList contains a list of StagedUpdateRun. +// +kubebuilder:resource:scope=Namespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type StagedUpdateRunList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StagedUpdateRun `json:"items"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Namespaced,categories={fleet,fleet-placement},shortName=sus +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StagedUpdateStrategy defines a reusable strategy that specifies the stages and the sequence +// in which the selected cluster resources will be updated on the member clusters. +type StagedUpdateStrategy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of StagedUpdateStrategy. + // +kubebuilder:validation:Required + Spec UpdateStrategySpec `json:"spec"` +} + +// StagedUpdateStrategyList contains a list of StagedUpdateStrategy. +// +kubebuilder:resource:scope=Namespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type StagedUpdateStrategyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StagedUpdateStrategy `json:"items"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced,categories={fleet,fleet-placement},shortName=areq +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:printcolumn:JSONPath=`.spec.parentStageRollout`,name="Update-Run",type=string +// +kubebuilder:printcolumn:JSONPath=`.spec.targetStage`,name="Stage",type=string +// +kubebuilder:printcolumn:JSONPath=`.status.conditions[?(@.type=="Approved")].status`,name="Approved",type=string +// +kubebuilder:printcolumn:JSONPath=`.metadata.creationTimestamp`,name="Age",type=date + +// ApprovalRequest defines a request for user approval for staged update run. +// The request object MUST have the following labels: +// - `TargetUpdateRun`: Points to the staged update run that this approval request is for. +// - `TargetStage`: The name of the stage that this approval request is for. +// - `IsLatestUpdateRunApproval`: Indicates whether this approval request is the latest one related to this update run. +// - `TaskType`: Indicates whether this approval request is for the before or after stage task. +type ApprovalRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The desired state of ApprovalRequest. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="The spec field is immutable" + // +kubebuilder:validation:Required + Spec ApprovalRequestSpec `json:"spec"` + + // The observed state of ApprovalRequest. + // +kubebuilder:validation:Optional + Status ApprovalRequestStatus `json:"status,omitempty"` +} + +// ApprovalRequestList contains a list of ApprovalRequest. +// +kubebuilder:resource:scope=Namespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ApprovalRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ApprovalRequest `json:"items"` +} + +func init() { + SchemeBuilder.Register( + &ClusterStagedUpdateRun{}, &ClusterStagedUpdateRunList{}, + &ClusterStagedUpdateStrategy{}, &ClusterStagedUpdateStrategyList{}, + &ClusterApprovalRequest{}, &ClusterApprovalRequestList{}, + &StagedUpdateRun{}, &StagedUpdateRunList{}, + &StagedUpdateStrategy{}, &StagedUpdateStrategyList{}, + &ApprovalRequest{}, &ApprovalRequestList{}, + ) +} diff --git a/apis/placement/v1/zz_generated.deepcopy.go b/apis/placement/v1/zz_generated.deepcopy.go index fc248dbd2..6749dbaaa 100644 --- a/apis/placement/v1/zz_generated.deepcopy.go +++ b/apis/placement/v1/zz_generated.deepcopy.go @@ -176,6 +176,102 @@ func (in *ApplyStrategy) DeepCopy() *ApplyStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApprovalRequest) DeepCopyInto(out *ApprovalRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApprovalRequest. +func (in *ApprovalRequest) DeepCopy() *ApprovalRequest { + if in == nil { + return nil + } + out := new(ApprovalRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApprovalRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApprovalRequestList) DeepCopyInto(out *ApprovalRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ApprovalRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApprovalRequestList. +func (in *ApprovalRequestList) DeepCopy() *ApprovalRequestList { + if in == nil { + return nil + } + out := new(ApprovalRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ApprovalRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApprovalRequestSpec) DeepCopyInto(out *ApprovalRequestSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApprovalRequestSpec. +func (in *ApprovalRequestSpec) DeepCopy() *ApprovalRequestSpec { + if in == nil { + return nil + } + out := new(ApprovalRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApprovalRequestStatus) DeepCopyInto(out *ApprovalRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApprovalRequestStatus. +func (in *ApprovalRequestStatus) DeepCopy() *ApprovalRequestStatus { + if in == nil { + return nil + } + out := new(ApprovalRequestStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterAffinity) DeepCopyInto(out *ClusterAffinity) { *out = *in @@ -203,6 +299,65 @@ func (in *ClusterAffinity) DeepCopy() *ClusterAffinity { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterApprovalRequest) DeepCopyInto(out *ClusterApprovalRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterApprovalRequest. +func (in *ClusterApprovalRequest) DeepCopy() *ClusterApprovalRequest { + if in == nil { + return nil + } + out := new(ClusterApprovalRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterApprovalRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterApprovalRequestList) DeepCopyInto(out *ClusterApprovalRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterApprovalRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterApprovalRequestList. +func (in *ClusterApprovalRequestList) DeepCopy() *ClusterApprovalRequestList { + if in == nil { + return nil + } + out := new(ClusterApprovalRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterApprovalRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterDecision) DeepCopyInto(out *ClusterDecision) { *out = *in @@ -794,6 +949,155 @@ func (in *ClusterSelectorTerm) DeepCopy() *ClusterSelectorTerm { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateRun) DeepCopyInto(out *ClusterStagedUpdateRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateRun. +func (in *ClusterStagedUpdateRun) DeepCopy() *ClusterStagedUpdateRun { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateRunList) DeepCopyInto(out *ClusterStagedUpdateRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterStagedUpdateRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateRunList. +func (in *ClusterStagedUpdateRunList) DeepCopy() *ClusterStagedUpdateRunList { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateStrategy) DeepCopyInto(out *ClusterStagedUpdateStrategy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateStrategy. +func (in *ClusterStagedUpdateStrategy) DeepCopy() *ClusterStagedUpdateStrategy { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateStrategy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterStagedUpdateStrategyList) DeepCopyInto(out *ClusterStagedUpdateStrategyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterStagedUpdateStrategy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStagedUpdateStrategyList. +func (in *ClusterStagedUpdateStrategyList) DeepCopy() *ClusterStagedUpdateStrategyList { + if in == nil { + return nil + } + out := new(ClusterStagedUpdateStrategyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterStagedUpdateStrategyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterUpdatingStatus) DeepCopyInto(out *ClusterUpdatingStatus) { + *out = *in + if in.ResourceOverrideSnapshots != nil { + in, out := &in.ResourceOverrideSnapshots, &out.ResourceOverrideSnapshots + *out = make([]NamespacedName, len(*in)) + copy(*out, *in) + } + if in.ClusterResourceOverrideSnapshots != nil { + in, out := &in.ClusterResourceOverrideSnapshots, &out.ClusterResourceOverrideSnapshots + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterUpdatingStatus. +func (in *ClusterUpdatingStatus) DeepCopy() *ClusterUpdatingStatus { + if in == nil { + return nil + } + out := new(ClusterUpdatingStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiffDetails) DeepCopyInto(out *DiffDetails) { *out = *in @@ -1713,6 +2017,260 @@ func (in *ServerSideApplyConfig) DeepCopy() *ServerSideApplyConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageConfig) DeepCopyInto(out *StageConfig) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SortingLabelKey != nil { + in, out := &in.SortingLabelKey, &out.SortingLabelKey + *out = new(string) + **out = **in + } + if in.MaxConcurrency != nil { + in, out := &in.MaxConcurrency, &out.MaxConcurrency + *out = new(intstr.IntOrString) + **out = **in + } + if in.AfterStageTasks != nil { + in, out := &in.AfterStageTasks, &out.AfterStageTasks + *out = make([]StageTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BeforeStageTasks != nil { + in, out := &in.BeforeStageTasks, &out.BeforeStageTasks + *out = make([]StageTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageConfig. +func (in *StageConfig) DeepCopy() *StageConfig { + if in == nil { + return nil + } + out := new(StageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageTask) DeepCopyInto(out *StageTask) { + *out = *in + if in.WaitTime != nil { + in, out := &in.WaitTime, &out.WaitTime + *out = new(metav1.Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageTask. +func (in *StageTask) DeepCopy() *StageTask { + if in == nil { + return nil + } + out := new(StageTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageTaskStatus) DeepCopyInto(out *StageTaskStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageTaskStatus. +func (in *StageTaskStatus) DeepCopy() *StageTaskStatus { + if in == nil { + return nil + } + out := new(StageTaskStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageUpdatingStatus) DeepCopyInto(out *StageUpdatingStatus) { + *out = *in + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make([]ClusterUpdatingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AfterStageTaskStatus != nil { + in, out := &in.AfterStageTaskStatus, &out.AfterStageTaskStatus + *out = make([]StageTaskStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BeforeStageTaskStatus != nil { + in, out := &in.BeforeStageTaskStatus, &out.BeforeStageTaskStatus + *out = make([]StageTaskStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.EndTime != nil { + in, out := &in.EndTime, &out.EndTime + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageUpdatingStatus. +func (in *StageUpdatingStatus) DeepCopy() *StageUpdatingStatus { + if in == nil { + return nil + } + out := new(StageUpdatingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StagedUpdateRun) DeepCopyInto(out *StagedUpdateRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StagedUpdateRun. +func (in *StagedUpdateRun) DeepCopy() *StagedUpdateRun { + if in == nil { + return nil + } + out := new(StagedUpdateRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StagedUpdateRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StagedUpdateRunList) DeepCopyInto(out *StagedUpdateRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StagedUpdateRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StagedUpdateRunList. +func (in *StagedUpdateRunList) DeepCopy() *StagedUpdateRunList { + if in == nil { + return nil + } + out := new(StagedUpdateRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StagedUpdateRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StagedUpdateStrategy) DeepCopyInto(out *StagedUpdateStrategy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StagedUpdateStrategy. +func (in *StagedUpdateStrategy) DeepCopy() *StagedUpdateStrategy { + if in == nil { + return nil + } + out := new(StagedUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StagedUpdateStrategy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StagedUpdateStrategyList) DeepCopyInto(out *StagedUpdateStrategyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StagedUpdateStrategy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StagedUpdateStrategyList. +func (in *StagedUpdateStrategyList) DeepCopy() *StagedUpdateStrategyList { + if in == nil { + return nil + } + out := new(StagedUpdateStrategyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StagedUpdateStrategyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Toleration) DeepCopyInto(out *Toleration) { *out = *in @@ -1748,6 +2306,87 @@ func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateRunSpec) DeepCopyInto(out *UpdateRunSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateRunSpec. +func (in *UpdateRunSpec) DeepCopy() *UpdateRunSpec { + if in == nil { + return nil + } + out := new(UpdateRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateRunStatus) DeepCopyInto(out *UpdateRunStatus) { + *out = *in + if in.ApplyStrategy != nil { + in, out := &in.ApplyStrategy, &out.ApplyStrategy + *out = new(ApplyStrategy) + (*in).DeepCopyInto(*out) + } + if in.UpdateStrategySnapshot != nil { + in, out := &in.UpdateStrategySnapshot, &out.UpdateStrategySnapshot + *out = new(UpdateStrategySpec) + (*in).DeepCopyInto(*out) + } + if in.StagesStatus != nil { + in, out := &in.StagesStatus, &out.StagesStatus + *out = make([]StageUpdatingStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeletionStageStatus != nil { + in, out := &in.DeletionStageStatus, &out.DeletionStageStatus + *out = new(StageUpdatingStatus) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateRunStatus. +func (in *UpdateRunStatus) DeepCopy() *UpdateRunStatus { + if in == nil { + return nil + } + out := new(UpdateRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateStrategySpec) DeepCopyInto(out *UpdateStrategySpec) { + *out = *in + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]StageConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategySpec. +func (in *UpdateStrategySpec) DeepCopy() *UpdateStrategySpec { + if in == nil { + return nil + } + out := new(UpdateStrategySpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Work) DeepCopyInto(out *Work) { *out = *in diff --git a/config/crd/bases/placement.kubernetes-fleet.io_approvalrequests.yaml b/config/crd/bases/placement.kubernetes-fleet.io_approvalrequests.yaml index b16f6ea71..b56bbdae9 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_approvalrequests.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_approvalrequests.yaml @@ -19,6 +19,138 @@ spec: singular: approvalrequest scope: Namespaced versions: + - additionalPrinterColumns: + - jsonPath: .spec.parentStageRollout + name: Update-Run + type: string + - jsonPath: .spec.targetStage + name: Stage + type: string + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + ApprovalRequest defines a request for user approval for staged update run. + The request object MUST have the following labels: + - `TargetUpdateRun`: Points to the staged update run that this approval request is for. + - `TargetStage`: The name of the stage that this approval request is for. + - `IsLatestUpdateRunApproval`: Indicates whether this approval request is the latest one related to this update run. + - `TaskType`: Indicates whether this approval request is for the before or after stage task. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ApprovalRequest. + properties: + parentStageRollout: + description: The name of the staged update run that this approval + request is for. + type: string + targetStage: + description: The name of the update stage that this approval request + is for. + type: string + required: + - parentStageRollout + - targetStage + type: object + x-kubernetes-validations: + - message: The spec field is immutable + rule: self == oldSelf + status: + description: The observed state of ApprovalRequest. + properties: + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of post-update task. + Known conditions are "Approved" and "ApprovalAccepted". + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - jsonPath: .spec.parentStageRollout name: Update-Run diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml index 2333e23f1..02ddd96fe 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterapprovalrequests.yaml @@ -19,6 +19,138 @@ spec: singular: clusterapprovalrequest scope: Cluster versions: + - additionalPrinterColumns: + - jsonPath: .spec.parentStageRollout + name: Update-Run + type: string + - jsonPath: .spec.targetStage + name: Stage + type: string + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterApprovalRequest defines a request for user approval for cluster staged update run. + The request object MUST have the following labels: + - `TargetUpdateRun`: Points to the cluster staged update run that this approval request is for. + - `TargetStage`: The name of the stage that this approval request is for. + - `IsLatestUpdateRunApproval`: Indicates whether this approval request is the latest one related to this update run. + - `TaskType`: Indicates whether this approval request is for the before or after stage task. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ClusterApprovalRequest. + properties: + parentStageRollout: + description: The name of the staged update run that this approval + request is for. + type: string + targetStage: + description: The name of the update stage that this approval request + is for. + type: string + required: + - parentStageRollout + - targetStage + type: object + x-kubernetes-validations: + - message: The spec field is immutable + rule: self == oldSelf + status: + description: The observed state of ClusterApprovalRequest. + properties: + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of post-update task. + Known conditions are "Approved" and "ApprovalAccepted". + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - jsonPath: .spec.parentStageRollout name: Update-Run diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml index 7f7ead012..d4955b6ce 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdateruns.yaml @@ -19,6 +19,1349 @@ spec: singular: clusterstagedupdaterun scope: Cluster versions: + - additionalPrinterColumns: + - jsonPath: .spec.placementName + name: Placement + type: string + - jsonPath: .spec.resourceSnapshotIndex + name: Resource-Snapshot-Index + type: string + - jsonPath: .status.policySnapshotIndexUsed + name: Policy-Snapshot-Index + type: string + - jsonPath: .status.conditions[?(@.type=="Initialized")].status + name: Initialized + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Succeeded")].status + name: Succeeded + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.stagedRolloutStrategyName + name: Strategy + priority: 1 + type: string + name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterStagedUpdateRun represents a stage by stage update process that applies ClusterResourcePlacement + selected resources to specified clusters. + Resources from unselected clusters are removed after all stages in the update strategy are completed. + Each ClusterStagedUpdateRun object corresponds to a single release of a specific resource version. + The release is abandoned if the ClusterStagedUpdateRun object is deleted or the scheduling decision changes. + The name of the ClusterStagedUpdateRun must conform to RFC 1123. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ClusterStagedUpdateRun. + properties: + placementName: + description: |- + PlacementName is the name of placement that this update run is applied to. + There can be multiple active update runs for each placement, but + it's up to the DevOps team to ensure they don't conflict with each other. + maxLength: 255 + type: string + x-kubernetes-validations: + - message: placementName is immutable + rule: self == oldSelf + resourceSnapshotIndex: + description: |- + The resource snapshot index of the selected resources to be updated across clusters. + The index represents a group of resource snapshots that includes all the resources a ResourcePlacement selected. + type: string + x-kubernetes-validations: + - message: resourceSnapshotIndex is immutable + rule: self == oldSelf + stagedRolloutStrategyName: + description: |- + The name of the update strategy that specifies the stages and the sequence + in which the selected resources will be updated on the member clusters. The stages + are computed according to the referenced strategy when the update run starts + and recorded in the status field. + type: string + x-kubernetes-validations: + - message: stagedRolloutStrategyName is immutable + rule: self == oldSelf + state: + default: Initialize + description: |- + State indicates the desired state of the update run. + Initialize: The update run should be initialized but execution should not start (default). + Run: The update run should execute or resume execution. + Stop: The update run should stop execution. + enum: + - Initialize + - Run + - Stop + type: string + required: + - placementName + - stagedRolloutStrategyName + type: object + x-kubernetes-validations: + - message: 'invalid state transition: cannot transition from Initialize + to Stop' + rule: '!(has(oldSelf.state) && oldSelf.state == ''Initialize'' && self.state + == ''Stop'')' + - message: 'invalid state transition: cannot transition from Run to Initialize' + rule: '!(has(oldSelf.state) && oldSelf.state == ''Run'' && self.state + == ''Initialize'')' + - message: 'invalid state transition: cannot transition from Stop to Initialize' + rule: '!(has(oldSelf.state) && oldSelf.state == ''Stop'' && self.state + == ''Initialize'')' + status: + description: The observed status of ClusterStagedUpdateRun. + properties: + appliedStrategy: + description: |- + ApplyStrategy is the apply strategy that the stagedUpdateRun is using. + It is the same as the apply strategy in the CRP when the staged update run starts. + The apply strategy is not updated during the update run even if it changes in the CRP. + properties: + allowCoOwnership: + description: |- + AllowCoOwnership controls whether co-ownership between Fleet and other agents are allowed + on a Fleet-managed resource. If set to false, Fleet will refuse to apply manifests to + a resource that has been owned by one or more non-Fleet agents. + + Note that Fleet does not support the case where one resource is being placed multiple + times by different CRPs on the same member cluster. An apply error will be returned if + Fleet finds that a resource has been owned by another placement attempt by Fleet, even + with the AllowCoOwnership setting set to true. + type: boolean + comparisonOption: + default: PartialComparison + description: |- + ComparisonOption controls how Fleet compares the desired state of a resource, as kept in + a hub cluster manifest, with the current state of the resource (if applicable) in the + member cluster. + + Available options are: + + * PartialComparison: with this option, Fleet will compare only fields that are managed by + Fleet, i.e., the fields that are specified explicitly in the hub cluster manifest. + Unmanaged fields are ignored. This is the default option. + + * FullComparison: with this option, Fleet will compare all fields of the resource, + even if the fields are absent from the hub cluster manifest. + + Consider using the PartialComparison option if you would like to: + + * use the default values for certain fields; or + * let another agent, e.g., HPAs, VPAs, etc., on the member cluster side manage some fields; or + * allow ad-hoc or cluster-specific settings on the member cluster side. + + To use the FullComparison option, it is recommended that you: + + * specify all fields as appropriate in the hub cluster, even if you are OK with using default + values; + * make sure that no fields are managed by agents other than Fleet on the member cluster + side, such as HPAs, VPAs, or other controllers. + + See the Fleet documentation for further explanations and usage examples. + enum: + - PartialComparison + - FullComparison + type: string + serverSideApplyConfig: + description: ServerSideApplyConfig defines the configuration for + server side apply. It is honored only when type is ServerSideApply. + properties: + force: + description: |- + Force represents to force apply to succeed when resolving the conflicts + For any conflicting fields, + - If true, use the values from the resource to be applied to overwrite the values of the existing resource in the + target cluster, as well as take over ownership of such fields. + - If false, apply will fail with the reason ApplyConflictWithOtherApplier. + + For non-conflicting fields, values stay unchanged and ownership are shared between appliers. + type: boolean + type: object + type: + default: ClientSideApply + description: |- + Type is the apply strategy to use; it determines how Fleet applies manifests from the + hub cluster to a member cluster. + + Available options are: + + * ClientSideApply: Fleet uses three-way merge to apply manifests, similar to how kubectl + performs a client-side apply. This is the default option. + + Note that this strategy requires that Fleet keep the last applied configuration in the + annotation of an applied resource. If the object gets so large that apply ops can no longer + be executed, Fleet will switch to server-side apply. + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ServerSideApply: Fleet uses server-side apply to apply manifests; Fleet itself will + become the field manager for specified fields in the manifests. Specify + ServerSideApplyConfig as appropriate if you would like Fleet to take over field + ownership upon conflicts. This is the recommended option for most scenarios; it might + help reduce object size and safely resolve conflicts between field values. For more + information, please refer to the Kubernetes documentation + (https://kubernetes.io/docs/reference/using-api/server-side-apply/#comparison-with-client-side-apply). + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster + with its current state (if applicable) on the member cluster side, and report any + differences. No actual apply ops would be executed, and resources will be left alone as they + are on the member clusters. + + If configuration differences are found on a resource, Fleet will consider this as an apply + error, which might block rollout depending on the specified rollout strategy. + + Use ComparisonOption setting to control how the difference is calculated. + + ClientSideApply and ServerSideApply apply strategies only work when Fleet can assume + ownership of a resource (e.g., the resource is created by Fleet, or Fleet has taken over + the resource). See the comments on the WhenToTakeOver field for more information. + ReportDiff apply strategy, however, will function regardless of Fleet's ownership + status. One may set up a CRP with the ReportDiff strategy and the Never takeover option, + and this will turn Fleet into a detection tool that reports only configuration differences + but do not touch any resources on the member cluster side. + + For a comparison between the different strategies and usage examples, refer to the + Fleet documentation. + enum: + - ClientSideApply + - ServerSideApply + - ReportDiff + type: string + whenToApply: + default: Always + description: |- + WhenToApply controls when Fleet would apply the manifests on the hub cluster to the member + clusters. + + Available options are: + + * Always: with this option, Fleet will periodically apply hub cluster manifests + on the member cluster side; this will effectively overwrite any change in the fields + managed by Fleet (i.e., specified in the hub cluster manifest). This is the default + option. + + Note that this option would revert any ad-hoc changes made on the member cluster side in the + managed fields; if you would like to make temporary edits on the member cluster side + in the managed fields, switch to IfNotDrifted option. Note that changes in unmanaged + fields will be left alone; if you use the FullDiff compare option, such changes will + be reported as drifts. + + * IfNotDrifted: with this option, Fleet will stop applying hub cluster manifests on + clusters that have drifted from the desired state; apply ops would still continue on + the rest of the clusters. Drifts are calculated using the ComparisonOption, + as explained in the corresponding field. + + Use this option if you would like Fleet to detect drifts in your multi-cluster setup. + A drift occurs when an agent makes an ad-hoc change on the member cluster side that + makes affected resources deviate from its desired state as kept in the hub cluster; + and this option grants you an opportunity to view the drift details and take actions + accordingly. The drift details will be reported in the CRP status. + + To fix a drift, you may: + + * revert the changes manually on the member cluster side + * update the hub cluster manifest; this will trigger Fleet to apply the latest revision + of the manifests, which will overwrite the drifted fields + (if they are managed by Fleet) + * switch to the Always option; this will trigger Fleet to apply the current revision + of the manifests, which will overwrite the drifted fields (if they are managed by Fleet). + * if applicable and necessary, delete the drifted resources on the member cluster side; Fleet + will attempt to re-create them using the hub cluster manifests + enum: + - Always + - IfNotDrifted + type: string + whenToTakeOver: + default: Always + description: |- + WhenToTakeOver determines the action to take when Fleet applies resources to a member + cluster for the first time and finds out that the resource already exists in the cluster. + + This setting is most relevant in cases where you would like Fleet to manage pre-existing + resources on a member cluster. + + Available options include: + + * Always: with this action, Fleet will apply the hub cluster manifests to the member + clusters even if the affected resources already exist. This is the default action. + + Note that this might lead to fields being overwritten on the member clusters, if they + are specified in the hub cluster manifests. + + * IfNoDiff: with this action, Fleet will apply the hub cluster manifests to the member + clusters if (and only if) pre-existing resources look the same as the hub cluster manifests. + + This is a safer option as pre-existing resources that are inconsistent with the hub cluster + manifests will not be overwritten; Fleet will ignore them until the inconsistencies + are resolved properly: any change you make to the hub cluster manifests would not be + applied, and if you delete the manifests or even the ClusterResourcePlacement itself + from the hub cluster, these pre-existing resources would not be taken away. + + Fleet will check for inconsistencies in accordance with the ComparisonOption setting. See also + the comments on the ComparisonOption field for more information. + + If a diff has been found in a field that is **managed** by Fleet (i.e., the field + **is specified ** in the hub cluster manifest), consider one of the following actions: + * set the field in the member cluster to be of the same value as that in the hub cluster + manifest. + * update the hub cluster manifest so that its field value matches with that in the member + cluster. + * switch to the Always action, which will allow Fleet to overwrite the field with the + value in the hub cluster manifest. + + If a diff has been found in a field that is **not managed** by Fleet (i.e., the field + **is not specified** in the hub cluster manifest), consider one of the following actions: + * remove the field from the member cluster. + * update the hub cluster manifest so that the field is included in the hub cluster manifest. + + If appropriate, you may also delete the object from the member cluster; Fleet will recreate + it using the hub cluster manifest. + + * Never: with this action, Fleet will not apply a hub cluster manifest to the member + clusters if there is a corresponding pre-existing resource. However, if a manifest + has never been applied yet; or it has a corresponding resource which Fleet has assumed + ownership, apply op will still be executed. + + This is the safest option; one will have to remove the pre-existing resources (so that + Fleet can re-create them) or switch to a different + WhenToTakeOver option before Fleet starts processing the corresponding hub cluster + manifests. + + If you prefer Fleet stop processing all manifests, use this option along with the + ReportDiff apply strategy type. This setup would instruct Fleet to touch nothing + on the member cluster side but still report configuration differences between the + hub cluster and member clusters. Fleet will not give up ownership + that it has already assumed though. + enum: + - Always + - IfNoDiff + - Never + type: string + type: object + conditions: + description: |- + Conditions is an array of current observed conditions for StagedUpdateRun. + Known conditions are "Initialized", "Progressing", "Succeeded". + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + deletionStageStatus: + description: |- + DeletionStageStatus lists the current status of the deletion stage. The deletion stage + removes all the resources from the clusters that are not selected by the + current policy after all the update stages are completed. + properties: + afterStageTaskStatus: + description: |- + The status of the post-update tasks associated with the current stage. + Empty if the stage has not finished updating all the clusters. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 2 + type: array + beforeStageTaskStatus: + description: The status of the pre-update tasks associated with + the current stage. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 1 + type: array + clusters: + description: The list of each cluster's updating status in this + stage. + items: + description: ClusterUpdatingStatus defines the status of the + update run on a cluster. + properties: + clusterName: + description: The name of the cluster. + type: string + clusterResourceOverrideSnapshots: + description: |- + ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no cluster overrides associated with the cluster. + items: + type: string + type: array + conditions: + description: |- + Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + Known conditions are "Started", "Succeeded". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + resourceOverrideSnapshots: + description: |- + ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no resource overrides associated with the cluster. + items: + description: NamespacedName comprises a resource name, + with a mandatory namespace. + properties: + name: + description: Name is the name of the namespaced scope + resource. + type: string + namespace: + description: Namespace is namespace of the namespaced + scope resource. + type: string + required: + - name + - namespace + type: object + type: array + required: + - clusterName + type: object + type: array + conditions: + description: |- + Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + Known conditions are "Progressing", "Succeeded". + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endTime: + description: The time when the update finished on the stage. Empty + if the stage has not started updating. + format: date-time + type: string + stageName: + description: The name of the stage. + type: string + startTime: + description: The time when the update started on the stage. Empty + if the stage has not started updating. + format: date-time + type: string + required: + - clusters + - stageName + type: object + policyObservedClusterCount: + description: |- + PolicyObservedClusterCount records the number of observed clusters in the policy snapshot. + It is recorded at the beginning of the update run from the policy snapshot object. + If the `ObservedClusterCount` value is updated during the update run, the update run is abandoned. + type: integer + policySnapshotIndexUsed: + description: |- + PolicySnapShotIndexUsed records the policy snapshot index of the ClusterResourcePlacement (CRP) that + the update run is based on. The index represents the latest policy snapshot at the start of the update run. + If a newer policy snapshot is detected after the run starts, the staged update run is abandoned. + The scheduler must identify all clusters that meet the current policy before the update run begins. + All clusters involved in the update run are selected from the list of clusters scheduled by the CRP according + to the current policy. + type: string + resourceSnapshotIndexUsed: + description: |- + ResourceSnapshotIndexUsed records the resource snapshot index that the update run is based on. + The index represents the same resource snapshots as specified in the spec field, or the latest. + type: string + stagedUpdateStrategySnapshot: + description: |- + UpdateStrategySnapshot is the snapshot of the UpdateStrategy used for the update run. + The snapshot is immutable during the update run. + The strategy is applied to the list of clusters scheduled by the CRP according to the current policy. + The update run fails to initialize if the strategy fails to produce a valid list of stages where each selected + cluster is included in exactly one stage. + properties: + stages: + description: Stage specifies the configuration for each update + stage. + items: + description: |- + StageConfig describes a single update stage. + The clusters in each stage are updated sequentially. + The update stops if any of the updates fail. + properties: + afterStageTasks: + description: |- + The collection of tasks that each stage needs to complete successfully before moving to the next stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the + next stage. + properties: + type: + description: The type of the before or after stage + task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters + in the current stage complete the update before + moving to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 2 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not + allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: AfterStageTaskType is TimedWait, waitTime is + required + rule: '!self.exists(e, e.type == ''TimedWait'' && !has(e.waitTime))' + beforeStageTasks: + description: |- + The collection of tasks that needs to completed successfully by each stage before starting the stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the + next stage. + properties: + type: + description: The type of the before or after stage + task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters + in the current stage complete the update before + moving to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 1 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not + allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: BeforeStageTaskType cannot be TimedWait + rule: '!self.exists(e, e.type == ''TimedWait'')' + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + If the label selector is empty, the stage includes all the selected clusters. + If the label selector is nil, the stage does not include any selected clusters. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxConcurrency: + anyOf: + - type: integer + - type: string + default: 1 + description: |- + MaxConcurrency specifies the maximum number of clusters that can be updated concurrently within this stage. + Value can be an absolute number (ex: 5) or a percentage of the total clusters in the stage (ex: 50%). + Fractional results are rounded down. A minimum of 1 update is enforced. + If not specified, all clusters in the stage are updated sequentially (effectively maxConcurrency = 1). + Defaults to 1. + pattern: ^(100|[1-9][0-9]?)%$ + x-kubernetes-int-or-string: true + x-kubernetes-validations: + - message: maxConcurrency must be at least 1 + rule: self == null || type(self) != int || self >= 1 + name: + description: The name of the stage. This MUST be unique + within the same StagedUpdateStrategy. + maxLength: 63 + pattern: ^[a-z0-9]+$ + type: string + sortingLabelKey: + description: |- + The label key used to sort the selected clusters. + The clusters within the stage are updated sequentially following the rule below: + - primary: Ascending order based on the value of the label key, interpreted as integers if present. + - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + type: string + required: + - name + type: object + maxItems: 31 + type: array + required: + - stages + type: object + stagesStatus: + description: |- + StagesStatus lists the current updating status of each stage. + The list is empty if the update run is not started or failed to initialize. + items: + description: StageUpdatingStatus defines the status of the update + run in a stage. + properties: + afterStageTaskStatus: + description: |- + The status of the post-update tasks associated with the current stage. + Empty if the stage has not finished updating all the clusters. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 2 + type: array + beforeStageTaskStatus: + description: The status of the pre-update tasks associated with + the current stage. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 1 + type: array + clusters: + description: The list of each cluster's updating status in this + stage. + items: + description: ClusterUpdatingStatus defines the status of the + update run on a cluster. + properties: + clusterName: + description: The name of the cluster. + type: string + clusterResourceOverrideSnapshots: + description: |- + ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no cluster overrides associated with the cluster. + items: + type: string + type: array + conditions: + description: |- + Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + Known conditions are "Started", "Succeeded". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + resourceOverrideSnapshots: + description: |- + ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no resource overrides associated with the cluster. + items: + description: NamespacedName comprises a resource name, + with a mandatory namespace. + properties: + name: + description: Name is the name of the namespaced + scope resource. + type: string + namespace: + description: Namespace is namespace of the namespaced + scope resource. + type: string + required: + - name + - namespace + type: object + type: array + required: + - clusterName + type: object + type: array + conditions: + description: |- + Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + Known conditions are "Progressing", "Succeeded". + items: + description: Condition contains details for one aspect of + the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endTime: + description: The time when the update finished on the stage. + Empty if the stage has not started updating. + format: date-time + type: string + stageName: + description: The name of the stage. + type: string + startTime: + description: The time when the update started on the stage. + Empty if the stage has not started updating. + format: date-time + type: string + required: + - clusters + - stageName + type: object + type: array + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: metadata.name max length is 63 + rule: size(self.metadata.name) < 64 + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - jsonPath: .spec.placementName name: Placement diff --git a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml index e4c5d099e..1556c8946 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_clusterstagedupdatestrategies.yaml @@ -19,6 +19,197 @@ spec: singular: clusterstagedupdatestrategy scope: Cluster versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + ClusterStagedUpdateStrategy defines a reusable strategy that specifies the stages and the sequence + in which the selected cluster resources will be updated on the member clusters. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of ClusterStagedUpdateStrategy. + properties: + stages: + description: Stage specifies the configuration for each update stage. + items: + description: |- + StageConfig describes a single update stage. + The clusters in each stage are updated sequentially. + The update stops if any of the updates fail. + properties: + afterStageTasks: + description: |- + The collection of tasks that each stage needs to complete successfully before moving to the next stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the next + stage. + properties: + type: + description: The type of the before or after stage task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters in + the current stage complete the update before moving + to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 2 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: AfterStageTaskType is TimedWait, waitTime is required + rule: '!self.exists(e, e.type == ''TimedWait'' && !has(e.waitTime))' + beforeStageTasks: + description: |- + The collection of tasks that needs to completed successfully by each stage before starting the stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the next + stage. + properties: + type: + description: The type of the before or after stage task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters in + the current stage complete the update before moving + to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 1 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: BeforeStageTaskType cannot be TimedWait + rule: '!self.exists(e, e.type == ''TimedWait'')' + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + If the label selector is empty, the stage includes all the selected clusters. + If the label selector is nil, the stage does not include any selected clusters. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxConcurrency: + anyOf: + - type: integer + - type: string + default: 1 + description: |- + MaxConcurrency specifies the maximum number of clusters that can be updated concurrently within this stage. + Value can be an absolute number (ex: 5) or a percentage of the total clusters in the stage (ex: 50%). + Fractional results are rounded down. A minimum of 1 update is enforced. + If not specified, all clusters in the stage are updated sequentially (effectively maxConcurrency = 1). + Defaults to 1. + pattern: ^(100|[1-9][0-9]?)%$ + x-kubernetes-int-or-string: true + x-kubernetes-validations: + - message: maxConcurrency must be at least 1 + rule: self == null || type(self) != int || self >= 1 + name: + description: The name of the stage. This MUST be unique within + the same StagedUpdateStrategy. + maxLength: 63 + pattern: ^[a-z0-9]+$ + type: string + sortingLabelKey: + description: |- + The label key used to sort the selected clusters. + The clusters within the stage are updated sequentially following the rule below: + - primary: Ascending order based on the value of the label key, interpreted as integers if present. + - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + type: string + required: + - name + type: object + maxItems: 31 + type: array + required: + - stages + type: object + required: + - spec + type: object + served: true + storage: false + subresources: + status: {} - name: v1alpha1 schema: openAPIV3Schema: diff --git a/config/crd/bases/placement.kubernetes-fleet.io_stagedupdateruns.yaml b/config/crd/bases/placement.kubernetes-fleet.io_stagedupdateruns.yaml index bca8fe903..a0a5c1a4c 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_stagedupdateruns.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_stagedupdateruns.yaml @@ -19,6 +19,1349 @@ spec: singular: stagedupdaterun scope: Namespaced versions: + - additionalPrinterColumns: + - jsonPath: .spec.placementName + name: Placement + type: string + - jsonPath: .spec.resourceSnapshotIndex + name: Resource-Snapshot-Index + type: string + - jsonPath: .status.policySnapshotIndexUsed + name: Policy-Snapshot-Index + type: string + - jsonPath: .status.conditions[?(@.type=="Initialized")].status + name: Initialized + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Succeeded")].status + name: Succeeded + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + - jsonPath: .spec.stagedRolloutStrategyName + name: Strategy + priority: 1 + type: string + name: v1 + schema: + openAPIV3Schema: + description: |- + StagedUpdateRun represents a stage by stage update process that applies ResourcePlacement + selected resources to specified clusters. + Resources from unselected clusters are removed after all stages in the update strategy are completed. + Each StagedUpdateRun object corresponds to a single release of a specific resource version. + The release is abandoned if the StagedUpdateRun object is deleted or the scheduling decision changes. + The name of the StagedUpdateRun must conform to RFC 1123. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of StagedUpdateRun. + properties: + placementName: + description: |- + PlacementName is the name of placement that this update run is applied to. + There can be multiple active update runs for each placement, but + it's up to the DevOps team to ensure they don't conflict with each other. + maxLength: 255 + type: string + x-kubernetes-validations: + - message: placementName is immutable + rule: self == oldSelf + resourceSnapshotIndex: + description: |- + The resource snapshot index of the selected resources to be updated across clusters. + The index represents a group of resource snapshots that includes all the resources a ResourcePlacement selected. + type: string + x-kubernetes-validations: + - message: resourceSnapshotIndex is immutable + rule: self == oldSelf + stagedRolloutStrategyName: + description: |- + The name of the update strategy that specifies the stages and the sequence + in which the selected resources will be updated on the member clusters. The stages + are computed according to the referenced strategy when the update run starts + and recorded in the status field. + type: string + x-kubernetes-validations: + - message: stagedRolloutStrategyName is immutable + rule: self == oldSelf + state: + default: Initialize + description: |- + State indicates the desired state of the update run. + Initialize: The update run should be initialized but execution should not start (default). + Run: The update run should execute or resume execution. + Stop: The update run should stop execution. + enum: + - Initialize + - Run + - Stop + type: string + required: + - placementName + - stagedRolloutStrategyName + type: object + x-kubernetes-validations: + - message: 'invalid state transition: cannot transition from Initialize + to Stop' + rule: '!(has(oldSelf.state) && oldSelf.state == ''Initialize'' && self.state + == ''Stop'')' + - message: 'invalid state transition: cannot transition from Run to Initialize' + rule: '!(has(oldSelf.state) && oldSelf.state == ''Run'' && self.state + == ''Initialize'')' + - message: 'invalid state transition: cannot transition from Stop to Initialize' + rule: '!(has(oldSelf.state) && oldSelf.state == ''Stop'' && self.state + == ''Initialize'')' + status: + description: The observed status of StagedUpdateRun. + properties: + appliedStrategy: + description: |- + ApplyStrategy is the apply strategy that the stagedUpdateRun is using. + It is the same as the apply strategy in the CRP when the staged update run starts. + The apply strategy is not updated during the update run even if it changes in the CRP. + properties: + allowCoOwnership: + description: |- + AllowCoOwnership controls whether co-ownership between Fleet and other agents are allowed + on a Fleet-managed resource. If set to false, Fleet will refuse to apply manifests to + a resource that has been owned by one or more non-Fleet agents. + + Note that Fleet does not support the case where one resource is being placed multiple + times by different CRPs on the same member cluster. An apply error will be returned if + Fleet finds that a resource has been owned by another placement attempt by Fleet, even + with the AllowCoOwnership setting set to true. + type: boolean + comparisonOption: + default: PartialComparison + description: |- + ComparisonOption controls how Fleet compares the desired state of a resource, as kept in + a hub cluster manifest, with the current state of the resource (if applicable) in the + member cluster. + + Available options are: + + * PartialComparison: with this option, Fleet will compare only fields that are managed by + Fleet, i.e., the fields that are specified explicitly in the hub cluster manifest. + Unmanaged fields are ignored. This is the default option. + + * FullComparison: with this option, Fleet will compare all fields of the resource, + even if the fields are absent from the hub cluster manifest. + + Consider using the PartialComparison option if you would like to: + + * use the default values for certain fields; or + * let another agent, e.g., HPAs, VPAs, etc., on the member cluster side manage some fields; or + * allow ad-hoc or cluster-specific settings on the member cluster side. + + To use the FullComparison option, it is recommended that you: + + * specify all fields as appropriate in the hub cluster, even if you are OK with using default + values; + * make sure that no fields are managed by agents other than Fleet on the member cluster + side, such as HPAs, VPAs, or other controllers. + + See the Fleet documentation for further explanations and usage examples. + enum: + - PartialComparison + - FullComparison + type: string + serverSideApplyConfig: + description: ServerSideApplyConfig defines the configuration for + server side apply. It is honored only when type is ServerSideApply. + properties: + force: + description: |- + Force represents to force apply to succeed when resolving the conflicts + For any conflicting fields, + - If true, use the values from the resource to be applied to overwrite the values of the existing resource in the + target cluster, as well as take over ownership of such fields. + - If false, apply will fail with the reason ApplyConflictWithOtherApplier. + + For non-conflicting fields, values stay unchanged and ownership are shared between appliers. + type: boolean + type: object + type: + default: ClientSideApply + description: |- + Type is the apply strategy to use; it determines how Fleet applies manifests from the + hub cluster to a member cluster. + + Available options are: + + * ClientSideApply: Fleet uses three-way merge to apply manifests, similar to how kubectl + performs a client-side apply. This is the default option. + + Note that this strategy requires that Fleet keep the last applied configuration in the + annotation of an applied resource. If the object gets so large that apply ops can no longer + be executed, Fleet will switch to server-side apply. + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ServerSideApply: Fleet uses server-side apply to apply manifests; Fleet itself will + become the field manager for specified fields in the manifests. Specify + ServerSideApplyConfig as appropriate if you would like Fleet to take over field + ownership upon conflicts. This is the recommended option for most scenarios; it might + help reduce object size and safely resolve conflicts between field values. For more + information, please refer to the Kubernetes documentation + (https://kubernetes.io/docs/reference/using-api/server-side-apply/#comparison-with-client-side-apply). + + Use ComparisonOption and WhenToApply settings to control when an apply op can be executed. + + * ReportDiff: Fleet will compare the desired state of a resource as kept in the hub cluster + with its current state (if applicable) on the member cluster side, and report any + differences. No actual apply ops would be executed, and resources will be left alone as they + are on the member clusters. + + If configuration differences are found on a resource, Fleet will consider this as an apply + error, which might block rollout depending on the specified rollout strategy. + + Use ComparisonOption setting to control how the difference is calculated. + + ClientSideApply and ServerSideApply apply strategies only work when Fleet can assume + ownership of a resource (e.g., the resource is created by Fleet, or Fleet has taken over + the resource). See the comments on the WhenToTakeOver field for more information. + ReportDiff apply strategy, however, will function regardless of Fleet's ownership + status. One may set up a CRP with the ReportDiff strategy and the Never takeover option, + and this will turn Fleet into a detection tool that reports only configuration differences + but do not touch any resources on the member cluster side. + + For a comparison between the different strategies and usage examples, refer to the + Fleet documentation. + enum: + - ClientSideApply + - ServerSideApply + - ReportDiff + type: string + whenToApply: + default: Always + description: |- + WhenToApply controls when Fleet would apply the manifests on the hub cluster to the member + clusters. + + Available options are: + + * Always: with this option, Fleet will periodically apply hub cluster manifests + on the member cluster side; this will effectively overwrite any change in the fields + managed by Fleet (i.e., specified in the hub cluster manifest). This is the default + option. + + Note that this option would revert any ad-hoc changes made on the member cluster side in the + managed fields; if you would like to make temporary edits on the member cluster side + in the managed fields, switch to IfNotDrifted option. Note that changes in unmanaged + fields will be left alone; if you use the FullDiff compare option, such changes will + be reported as drifts. + + * IfNotDrifted: with this option, Fleet will stop applying hub cluster manifests on + clusters that have drifted from the desired state; apply ops would still continue on + the rest of the clusters. Drifts are calculated using the ComparisonOption, + as explained in the corresponding field. + + Use this option if you would like Fleet to detect drifts in your multi-cluster setup. + A drift occurs when an agent makes an ad-hoc change on the member cluster side that + makes affected resources deviate from its desired state as kept in the hub cluster; + and this option grants you an opportunity to view the drift details and take actions + accordingly. The drift details will be reported in the CRP status. + + To fix a drift, you may: + + * revert the changes manually on the member cluster side + * update the hub cluster manifest; this will trigger Fleet to apply the latest revision + of the manifests, which will overwrite the drifted fields + (if they are managed by Fleet) + * switch to the Always option; this will trigger Fleet to apply the current revision + of the manifests, which will overwrite the drifted fields (if they are managed by Fleet). + * if applicable and necessary, delete the drifted resources on the member cluster side; Fleet + will attempt to re-create them using the hub cluster manifests + enum: + - Always + - IfNotDrifted + type: string + whenToTakeOver: + default: Always + description: |- + WhenToTakeOver determines the action to take when Fleet applies resources to a member + cluster for the first time and finds out that the resource already exists in the cluster. + + This setting is most relevant in cases where you would like Fleet to manage pre-existing + resources on a member cluster. + + Available options include: + + * Always: with this action, Fleet will apply the hub cluster manifests to the member + clusters even if the affected resources already exist. This is the default action. + + Note that this might lead to fields being overwritten on the member clusters, if they + are specified in the hub cluster manifests. + + * IfNoDiff: with this action, Fleet will apply the hub cluster manifests to the member + clusters if (and only if) pre-existing resources look the same as the hub cluster manifests. + + This is a safer option as pre-existing resources that are inconsistent with the hub cluster + manifests will not be overwritten; Fleet will ignore them until the inconsistencies + are resolved properly: any change you make to the hub cluster manifests would not be + applied, and if you delete the manifests or even the ClusterResourcePlacement itself + from the hub cluster, these pre-existing resources would not be taken away. + + Fleet will check for inconsistencies in accordance with the ComparisonOption setting. See also + the comments on the ComparisonOption field for more information. + + If a diff has been found in a field that is **managed** by Fleet (i.e., the field + **is specified ** in the hub cluster manifest), consider one of the following actions: + * set the field in the member cluster to be of the same value as that in the hub cluster + manifest. + * update the hub cluster manifest so that its field value matches with that in the member + cluster. + * switch to the Always action, which will allow Fleet to overwrite the field with the + value in the hub cluster manifest. + + If a diff has been found in a field that is **not managed** by Fleet (i.e., the field + **is not specified** in the hub cluster manifest), consider one of the following actions: + * remove the field from the member cluster. + * update the hub cluster manifest so that the field is included in the hub cluster manifest. + + If appropriate, you may also delete the object from the member cluster; Fleet will recreate + it using the hub cluster manifest. + + * Never: with this action, Fleet will not apply a hub cluster manifest to the member + clusters if there is a corresponding pre-existing resource. However, if a manifest + has never been applied yet; or it has a corresponding resource which Fleet has assumed + ownership, apply op will still be executed. + + This is the safest option; one will have to remove the pre-existing resources (so that + Fleet can re-create them) or switch to a different + WhenToTakeOver option before Fleet starts processing the corresponding hub cluster + manifests. + + If you prefer Fleet stop processing all manifests, use this option along with the + ReportDiff apply strategy type. This setup would instruct Fleet to touch nothing + on the member cluster side but still report configuration differences between the + hub cluster and member clusters. Fleet will not give up ownership + that it has already assumed though. + enum: + - Always + - IfNoDiff + - Never + type: string + type: object + conditions: + description: |- + Conditions is an array of current observed conditions for StagedUpdateRun. + Known conditions are "Initialized", "Progressing", "Succeeded". + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + deletionStageStatus: + description: |- + DeletionStageStatus lists the current status of the deletion stage. The deletion stage + removes all the resources from the clusters that are not selected by the + current policy after all the update stages are completed. + properties: + afterStageTaskStatus: + description: |- + The status of the post-update tasks associated with the current stage. + Empty if the stage has not finished updating all the clusters. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 2 + type: array + beforeStageTaskStatus: + description: The status of the pre-update tasks associated with + the current stage. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 1 + type: array + clusters: + description: The list of each cluster's updating status in this + stage. + items: + description: ClusterUpdatingStatus defines the status of the + update run on a cluster. + properties: + clusterName: + description: The name of the cluster. + type: string + clusterResourceOverrideSnapshots: + description: |- + ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no cluster overrides associated with the cluster. + items: + type: string + type: array + conditions: + description: |- + Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + Known conditions are "Started", "Succeeded". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + resourceOverrideSnapshots: + description: |- + ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no resource overrides associated with the cluster. + items: + description: NamespacedName comprises a resource name, + with a mandatory namespace. + properties: + name: + description: Name is the name of the namespaced scope + resource. + type: string + namespace: + description: Namespace is namespace of the namespaced + scope resource. + type: string + required: + - name + - namespace + type: object + type: array + required: + - clusterName + type: object + type: array + conditions: + description: |- + Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + Known conditions are "Progressing", "Succeeded". + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endTime: + description: The time when the update finished on the stage. Empty + if the stage has not started updating. + format: date-time + type: string + stageName: + description: The name of the stage. + type: string + startTime: + description: The time when the update started on the stage. Empty + if the stage has not started updating. + format: date-time + type: string + required: + - clusters + - stageName + type: object + policyObservedClusterCount: + description: |- + PolicyObservedClusterCount records the number of observed clusters in the policy snapshot. + It is recorded at the beginning of the update run from the policy snapshot object. + If the `ObservedClusterCount` value is updated during the update run, the update run is abandoned. + type: integer + policySnapshotIndexUsed: + description: |- + PolicySnapShotIndexUsed records the policy snapshot index of the ClusterResourcePlacement (CRP) that + the update run is based on. The index represents the latest policy snapshot at the start of the update run. + If a newer policy snapshot is detected after the run starts, the staged update run is abandoned. + The scheduler must identify all clusters that meet the current policy before the update run begins. + All clusters involved in the update run are selected from the list of clusters scheduled by the CRP according + to the current policy. + type: string + resourceSnapshotIndexUsed: + description: |- + ResourceSnapshotIndexUsed records the resource snapshot index that the update run is based on. + The index represents the same resource snapshots as specified in the spec field, or the latest. + type: string + stagedUpdateStrategySnapshot: + description: |- + UpdateStrategySnapshot is the snapshot of the UpdateStrategy used for the update run. + The snapshot is immutable during the update run. + The strategy is applied to the list of clusters scheduled by the CRP according to the current policy. + The update run fails to initialize if the strategy fails to produce a valid list of stages where each selected + cluster is included in exactly one stage. + properties: + stages: + description: Stage specifies the configuration for each update + stage. + items: + description: |- + StageConfig describes a single update stage. + The clusters in each stage are updated sequentially. + The update stops if any of the updates fail. + properties: + afterStageTasks: + description: |- + The collection of tasks that each stage needs to complete successfully before moving to the next stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the + next stage. + properties: + type: + description: The type of the before or after stage + task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters + in the current stage complete the update before + moving to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 2 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not + allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: AfterStageTaskType is TimedWait, waitTime is + required + rule: '!self.exists(e, e.type == ''TimedWait'' && !has(e.waitTime))' + beforeStageTasks: + description: |- + The collection of tasks that needs to completed successfully by each stage before starting the stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the + next stage. + properties: + type: + description: The type of the before or after stage + task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters + in the current stage complete the update before + moving to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 1 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not + allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: BeforeStageTaskType cannot be TimedWait + rule: '!self.exists(e, e.type == ''TimedWait'')' + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + If the label selector is empty, the stage includes all the selected clusters. + If the label selector is nil, the stage does not include any selected clusters. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxConcurrency: + anyOf: + - type: integer + - type: string + default: 1 + description: |- + MaxConcurrency specifies the maximum number of clusters that can be updated concurrently within this stage. + Value can be an absolute number (ex: 5) or a percentage of the total clusters in the stage (ex: 50%). + Fractional results are rounded down. A minimum of 1 update is enforced. + If not specified, all clusters in the stage are updated sequentially (effectively maxConcurrency = 1). + Defaults to 1. + pattern: ^(100|[1-9][0-9]?)%$ + x-kubernetes-int-or-string: true + x-kubernetes-validations: + - message: maxConcurrency must be at least 1 + rule: self == null || type(self) != int || self >= 1 + name: + description: The name of the stage. This MUST be unique + within the same StagedUpdateStrategy. + maxLength: 63 + pattern: ^[a-z0-9]+$ + type: string + sortingLabelKey: + description: |- + The label key used to sort the selected clusters. + The clusters within the stage are updated sequentially following the rule below: + - primary: Ascending order based on the value of the label key, interpreted as integers if present. + - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + type: string + required: + - name + type: object + maxItems: 31 + type: array + required: + - stages + type: object + stagesStatus: + description: |- + StagesStatus lists the current updating status of each stage. + The list is empty if the update run is not started or failed to initialize. + items: + description: StageUpdatingStatus defines the status of the update + run in a stage. + properties: + afterStageTaskStatus: + description: |- + The status of the post-update tasks associated with the current stage. + Empty if the stage has not finished updating all the clusters. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 2 + type: array + beforeStageTaskStatus: + description: The status of the pre-update tasks associated with + the current stage. + items: + properties: + approvalRequestName: + description: |- + The name of the approval request object that is created for this stage. + Only valid if the AfterStageTaskType is Approval. + type: string + conditions: + description: |- + Conditions is an array of current observed conditions for the specific type of pre or post update task. + Known conditions are "ApprovalRequestCreated", "WaitTimeElapsed", and "ApprovalRequestApproved". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: + description: The type of the pre or post update task. + enum: + - TimedWait + - Approval + type: string + required: + - type + type: object + maxItems: 1 + type: array + clusters: + description: The list of each cluster's updating status in this + stage. + items: + description: ClusterUpdatingStatus defines the status of the + update run on a cluster. + properties: + clusterName: + description: The name of the cluster. + type: string + clusterResourceOverrideSnapshots: + description: |- + ClusterResourceOverrides contains a list of applicable ClusterResourceOverride snapshot names + associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no cluster overrides associated with the cluster. + items: + type: string + type: array + conditions: + description: |- + Conditions is an array of current observed conditions for clusters. Empty if the cluster has not started updating. + Known conditions are "Started", "Succeeded". + items: + description: Condition contains details for one aspect + of the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, + False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in + foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + resourceOverrideSnapshots: + description: |- + ResourceOverrideSnapshots is a list of ResourceOverride snapshots associated with the cluster. + The list is computed at the beginning of the update run and not updated during the update run. + The list is empty if there are no resource overrides associated with the cluster. + items: + description: NamespacedName comprises a resource name, + with a mandatory namespace. + properties: + name: + description: Name is the name of the namespaced + scope resource. + type: string + namespace: + description: Namespace is namespace of the namespaced + scope resource. + type: string + required: + - name + - namespace + type: object + type: array + required: + - clusterName + type: object + type: array + conditions: + description: |- + Conditions is an array of current observed updating conditions for the stage. Empty if the stage has not started updating. + Known conditions are "Progressing", "Succeeded". + items: + description: Condition contains details for one aspect of + the current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + endTime: + description: The time when the update finished on the stage. + Empty if the stage has not started updating. + format: date-time + type: string + stageName: + description: The name of the stage. + type: string + startTime: + description: The time when the update started on the stage. + Empty if the stage has not started updating. + format: date-time + type: string + required: + - clusters + - stageName + type: object + type: array + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: metadata.name max length is 63 + rule: size(self.metadata.name) < 64 + served: true + storage: false + subresources: + status: {} - additionalPrinterColumns: - jsonPath: .spec.placementName name: Placement diff --git a/config/crd/bases/placement.kubernetes-fleet.io_stagedupdatestrategies.yaml b/config/crd/bases/placement.kubernetes-fleet.io_stagedupdatestrategies.yaml index 6e1119657..487040869 100644 --- a/config/crd/bases/placement.kubernetes-fleet.io_stagedupdatestrategies.yaml +++ b/config/crd/bases/placement.kubernetes-fleet.io_stagedupdatestrategies.yaml @@ -19,6 +19,195 @@ spec: singular: stagedupdatestrategy scope: Namespaced versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + StagedUpdateStrategy defines a reusable strategy that specifies the stages and the sequence + in which the selected cluster resources will be updated on the member clusters. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: The desired state of StagedUpdateStrategy. + properties: + stages: + description: Stage specifies the configuration for each update stage. + items: + description: |- + StageConfig describes a single update stage. + The clusters in each stage are updated sequentially. + The update stops if any of the updates fail. + properties: + afterStageTasks: + description: |- + The collection of tasks that each stage needs to complete successfully before moving to the next stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the next + stage. + properties: + type: + description: The type of the before or after stage task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters in + the current stage complete the update before moving + to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 2 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: AfterStageTaskType is TimedWait, waitTime is required + rule: '!self.exists(e, e.type == ''TimedWait'' && !has(e.waitTime))' + beforeStageTasks: + description: |- + The collection of tasks that needs to completed successfully by each stage before starting the stage. + Each task is executed in parallel and there cannot be more than one task of the same type. + items: + description: StageTask is the pre or post stage task that + needs to be completed before starting or moving to the next + stage. + properties: + type: + description: The type of the before or after stage task. + enum: + - TimedWait + - Approval + type: string + waitTime: + description: The time to wait after all the clusters in + the current stage complete the update before moving + to the next stage. + pattern: ^0|([0-9]+(\.[0-9]+)?(s|m|h))+$ + type: string + required: + - type + type: object + maxItems: 1 + type: array + x-kubernetes-validations: + - message: AfterStageTaskType is Approval, waitTime is not allowed + rule: '!self.exists(e, e.type == ''Approval'' && has(e.waitTime))' + - message: BeforeStageTaskType cannot be TimedWait + rule: '!self.exists(e, e.type == ''TimedWait'')' + labelSelector: + description: |- + LabelSelector is a label query over all the joined member clusters. Clusters matching the query are selected + for this stage. There cannot be overlapping clusters between stages when the stagedUpdateRun is created. + If the label selector is empty, the stage includes all the selected clusters. + If the label selector is nil, the stage does not include any selected clusters. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxConcurrency: + anyOf: + - type: integer + - type: string + default: 1 + description: |- + MaxConcurrency specifies the maximum number of clusters that can be updated concurrently within this stage. + Value can be an absolute number (ex: 5) or a percentage of the total clusters in the stage (ex: 50%). + Fractional results are rounded down. A minimum of 1 update is enforced. + If not specified, all clusters in the stage are updated sequentially (effectively maxConcurrency = 1). + Defaults to 1. + pattern: ^(100|[1-9][0-9]?)%$ + x-kubernetes-int-or-string: true + x-kubernetes-validations: + - message: maxConcurrency must be at least 1 + rule: self == null || type(self) != int || self >= 1 + name: + description: The name of the stage. This MUST be unique within + the same StagedUpdateStrategy. + maxLength: 63 + pattern: ^[a-z0-9]+$ + type: string + sortingLabelKey: + description: |- + The label key used to sort the selected clusters. + The clusters within the stage are updated sequentially following the rule below: + - primary: Ascending order based on the value of the label key, interpreted as integers if present. + - secondary: Ascending order based on the name of the cluster if the label key is absent or the label value is the same. + type: string + required: + - name + type: object + maxItems: 31 + type: array + required: + - stages + type: object + required: + - spec + type: object + served: true + storage: false - name: v1beta1 schema: openAPIV3Schema: From ad61196028212d1718d728506fa320708108223f Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Wed, 11 Feb 2026 11:46:43 +1100 Subject: [PATCH 02/17] test: tweak CI workflow to enable Ginkgo-based parallelization for applicable cases + enable priority queue in all tests (#420) --- .github/workflows/ci.yml | 44 +++++++++++++---- Makefile | 17 +++++-- charts/member-agent/templates/deployment.yaml | 5 ++ charts/member-agent/values.yaml | 5 ++ .../workapplier/controller_test.go | 12 +++++ .../workapplier/drift_detection_takeover.go | 2 +- pkg/controllers/workapplier/suite_test.go | 48 ++++++++++++------- .../workapplier/waves_integration_test.go | 4 +- test/e2e/setup.sh | 2 + 9 files changed, 107 insertions(+), 32 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4738975f3..562aa6375 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,8 +30,9 @@ jobs: do_not_skip: '["workflow_dispatch", "schedule", "push"]' concurrent_skipping: false - unit-tests: - runs-on: ubuntu-latest + unit-and-integration-tests: + runs-on: + labels: oracle-vm-16cpu-64gb-x86-64 needs: detect-noop if: needs.detect-noop.outputs.noop != 'true' steps: @@ -46,18 +47,43 @@ jobs: - name: Set up Ginkgo CLI run: | go install github.com/onsi/ginkgo/v2/ginkgo@v2.19.1 + + - name: Prepare necessary environment variables + run: | + echo "CGO_ENABLED=1" >> $GITHUB_ENV + KUBEBUILDER_ASSETS=$(make --silent kubebuilder-assets-path) + echo "KUBEBUILDER_ASSETS="$KUBEBUILDER_ASSETS"" >> $GITHUB_ENV - - name: Run unit tests & Generate coverage - run: make test - + # Certain tests that require special setup (e.g., those that should be run with Ginkgo CLI only) will + # be skipped in this step. + # + # Note that the skipping only applies to the CI environment. + - name: Run unit and integration tests with default setup & generate coverage + run: | + make test + env: + KUBEFLEET_CI_TEST_RUNNER_NAME: 'default' + + # The work applier integration tests use in-memory Kubernetes environment setup; due to resource constraints + # and the way the tests are organized, running the suite with as many parallel Ginkgo processes as possible (i.e., + # the number of all CPU cores) might not lead to the optimal outcome. + # + # Note (chenyu1): switch to test matrices if we need to test with more configuration combos in the future. + - name: Run work applier unit and integration tests with Ginkgo CLI & generate coverage + run: | + ginkgo -v -p --procs=4 --race --cover -coverprofile=work-applier-it-coverage.out ./pkg/controllers/workapplier/ + KUBEFLEET_CI_WORK_APPLIER_RUN_WITH_PRIORITY_QUEUE=true ginkgo -v -p --procs=4 --race --cover -coverprofile=work-applier-it-no-pri-q-coverage.out ./pkg/controllers/workapplier/ + env: + KUBEFLEET_CI_TEST_RUNNER_NAME: 'ginkgo' + - name: Upload Codecov report uses: codecov/codecov-action@v5 with: - ## Repository upload token - get it from codecov.io. Required only for private repositories + ## Repository upload token - get it from codecov.io. Required only for private repositories token: ${{ secrets.CODECOV_TOKEN }} - ## Comma-separated list of files to upload - files: ./ut-coverage.xml - + # The codecov action will auto-search all coverage files by default. All uploaded coverage will be + # merged automatically. + e2e-tests: strategy: fail-fast: false diff --git a/Makefile b/Makefile index 4b7311151..4b49b106c 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,6 @@ ifeq ($(TARGET_ARCH),$(filter $(TARGET_ARCH),x86_64)) else ifeq ($(TARGET_ARCH),$(filter $(TARGET_ARCH),aarch64 arm)) TARGET_ARCH := arm64 endif -$(info Auto-detected system architecture: $(TARGET_ARCH)) endif endif @@ -136,23 +135,30 @@ vet: ## Run go vet against code ## -------------------------------------- .PHONY: test -test: manifests generate fmt vet local-unit-test integration-test## Run unit tests and integration tests +test: manifests generate fmt vet local-unit-test integration-test ## Run unit tests and integration tests ## # Set up the timeout parameters as some of the tests (rollout controller) lengths have exceeded the default 10 minute mark. -# TO-DO (chenyu1): enable parallelization for single package integration tests. +# Note: this recipe runs both unit tests and integration tests under the pkg/ directory. .PHONY: local-unit-test local-unit-test: $(ENVTEST) ## Run unit tests export CGO_ENABLED=1 && \ export KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" && \ go test `go list ./pkg/... ./cmd/...` -race -coverpkg=./... -coverprofile=ut-coverage.xml -covermode=atomic -v -timeout=30m +# Note: this recipe runs the integration tests under the /test/scheduler and /test/apis/ directories with the Ginkgo CLI. .PHONY: integration-test integration-test: $(ENVTEST) ## Run integration tests export CGO_ENABLED=1 && \ export KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" && \ - ginkgo -v -p --race --cover --coverpkg=./pkg/scheduler/... ./test/scheduler && \ - ginkgo -v -p --race --cover --coverpkg=./... ./test/apis/... + ginkgo -v -p --race --cover --coverpkg=./pkg/scheduler/... -coverprofile=scheduler-it.out ./test/scheduler && \ + ginkgo -v -p --race --cover --coverpkg=./apis/ -coverprofile=api-validation-it.out ./test/apis/... + +.PHONY: kubebuilder-assets-path +kubebuilder-assets-path: $(ENVTEST) ## Get the path to kubebuilder assets + @export CGO_ENABLED=1 && \ + export KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" && \ + echo $$KUBEBUILDER_ASSETS ## local tests & e2e tests @@ -237,6 +243,7 @@ push: ## Build and push all Docker images # On some systems the emulation setup might not work at all (e.g., macOS on Apple Silicon -> Rosetta 2 will be used # by Docker Desktop as the default emulation option for AMD64 on ARM64 container compatibility). docker-buildx-builder: + $(info Auto-detected system architecture: $(TARGET_ARCH)) @if ! docker buildx ls | grep $(BUILDX_BUILDER_NAME); then \ if [ "$(TARGET_ARCH)" = "amd64" ] ; then \ echo "The target is an x86_64 platform; setting up emulation for other known architectures"; \ diff --git a/charts/member-agent/templates/deployment.yaml b/charts/member-agent/templates/deployment.yaml index 8644b57e6..c003378c5 100644 --- a/charts/member-agent/templates/deployment.yaml +++ b/charts/member-agent/templates/deployment.yaml @@ -69,6 +69,11 @@ spec: {{- if .Values.region }} - --region={{ .Values.region }} {{- end }} + {{- if .Values.priorityQueue.enabled }} + - --enable-work-applier-priority-queue=true + - --work-applier-priority-linear-equation-coeff-a={{ .Values.priorityQueue.priorityLinearEquationCoeffA }} + - --work-applier-priority-linear-equation-coeff-b={{ .Values.priorityQueue.priorityLinearEquationCoeffB }} + {{- end }} env: - name: HUB_SERVER_URL value: "{{ .Values.config.hubURL }}" diff --git a/charts/member-agent/values.yaml b/charts/member-agent/values.yaml index 599dcdfc0..10c3578dd 100644 --- a/charts/member-agent/values.yaml +++ b/charts/member-agent/values.yaml @@ -64,3 +64,8 @@ enableV1Beta1APIs: true enablePprof: true pprofPort: 6065 hubPprofPort: 6066 + +priorityQueue: + enabled: false + priorityLinearEquationCoeffA: -3 + priorityLinearEquationCoeffB: 100 diff --git a/pkg/controllers/workapplier/controller_test.go b/pkg/controllers/workapplier/controller_test.go index 47e4dc092..698e501aa 100644 --- a/pkg/controllers/workapplier/controller_test.go +++ b/pkg/controllers/workapplier/controller_test.go @@ -41,6 +41,11 @@ import ( fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" ) +const ( + testRunnerNameEnvVarName = "KUBEFLEET_CI_TEST_RUNNER_NAME" + runnerNameToSkipTestsInCI = "default" +) + const ( workName = "work-1" @@ -255,6 +260,13 @@ func manifestAppliedCond(workGeneration int64, status metav1.ConditionStatus, re } func TestMain(m *testing.M) { + // Skip the tests if in the CI environment the tests are invoked with `go test` instead of the Ginkgo CLI. + // This has no effect outside the CI environment. + if v := os.Getenv(testRunnerNameEnvVarName); v == runnerNameToSkipTestsInCI { + log.Println("Skipping the tests in CI as they are not run with the expected runner") + os.Exit(0) + } + // Add custom APIs to the runtime scheme. if err := fleetv1beta1.AddToScheme(scheme.Scheme); err != nil { log.Fatalf("failed to add custom APIs (placement/v1beta1) to the runtime scheme: %v", err) diff --git a/pkg/controllers/workapplier/drift_detection_takeover.go b/pkg/controllers/workapplier/drift_detection_takeover.go index 457fe2d9e..e1461f4b6 100644 --- a/pkg/controllers/workapplier/drift_detection_takeover.go +++ b/pkg/controllers/workapplier/drift_detection_takeover.go @@ -70,7 +70,7 @@ func (r *Reconciler) takeOverPreExistingObject( // No takeover will be performed. // // Note that This will be registered as an (apply) error. - return nil, nil, false, fmt.Errorf("the object is already owned by some other sources(s) and co-ownership is disallowed") + return nil, nil, false, fmt.Errorf("the object is already owned by some other sources(s) and co-ownership is disallowed (existingOwnerRefs: %+v)", existingOwnerRefs) } // Check if the object is already owned by Fleet, but the owner is a different AppliedWork diff --git a/pkg/controllers/workapplier/suite_test.go b/pkg/controllers/workapplier/suite_test.go index df8582344..d4011a6a5 100644 --- a/pkg/controllers/workapplier/suite_test.go +++ b/pkg/controllers/workapplier/suite_test.go @@ -19,6 +19,7 @@ package workapplier import ( "context" "flag" + "os" "path/filepath" "strings" "sync" @@ -49,6 +50,10 @@ import ( testv1alpha1 "github.com/kubefleet-dev/kubefleet/test/apis/v1alpha1" ) +const ( + runWithPriorityQueueInCIEnvVarName = "KUBEFLEET_CI_WORK_APPLIER_RUN_WITH_PRIORITY_QUEUE" +) + // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. var ( @@ -89,6 +94,8 @@ var ( ctx context.Context cancel context.CancelFunc wg sync.WaitGroup + + usePriorityQueue = false ) const ( @@ -128,6 +135,11 @@ func (p *parallelizerWithFixedDelay) ParallelizeUntil(ctx context.Context, piece var _ parallelizer.Parallelizer = ¶llelizerWithFixedDelay{} func TestAPIs(t *testing.T) { + if v := os.Getenv(runWithPriorityQueueInCIEnvVarName); len(v) != 0 { + t.Log("Priority queue is enabled for the integration tests") + usePriorityQueue = true + } + RegisterFailHandler(Fail) RunSpecs(t, "Work Applier Integration Test Suite") @@ -163,6 +175,8 @@ func setupResources() { Expect(hubClient.Create(ctx, ns4)).To(Succeed()) } +// Note: each Ginkgo process must do the same setup; unlike our E2E tests, the integration +// tests uses in-memory testing environments, and as a result cannot be shared across processes. var _ = BeforeSuite(func() { ctx, cancel = context.WithCancel(context.TODO()) @@ -171,7 +185,9 @@ var _ = BeforeSuite(func() { klog.InitFlags(fs) Expect(fs.Parse([]string{"--v", "5", "-add_dir_header", "true"})).Should(Succeed()) - klog.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)) + klog.SetLogger(logger) + ctrl.SetLogger(logger) By("Bootstrapping test environments") hubEnv = &envtest.Environment{ @@ -307,10 +323,10 @@ var _ = BeforeSuite(func() { maxConcurrentReconciles, parallelizer.NewParallelizer(workerCount), 30*time.Second, - nil, // Use the default backoff rate limiter. - false, // Disable priority queueing. - nil, // Use the default priority linear equation coefficients. - nil, // Use the default priority linear equation coefficients. + nil, // Use the default backoff rate limiter. + usePriorityQueue, + nil, // Use the default priority linear equation coefficients. + nil, // Use the default priority linear equation coefficients. ) Expect(workApplier1.SetupWithManager(hubMgr1)).To(Succeed()) @@ -359,9 +375,9 @@ var _ = BeforeSuite(func() { parallelizer.NewParallelizer(workerCount), 30*time.Second, superLongExponentialBackoffRateLimiter, - false, // Disable priority queueing. - nil, // Use the default priority linear equation coefficients. - nil, // Use the default priority linear equation coefficients. + usePriorityQueue, + nil, // Use the default priority linear equation coefficients. + nil, // Use the default priority linear equation coefficients. ) Expect(workApplier2.SetupWithManager(hubMgr2)).To(Succeed()) @@ -397,10 +413,10 @@ var _ = BeforeSuite(func() { maxConcurrentReconciles, pWithDelay, 30*time.Second, - nil, // Use the default backoff rate limiter. - false, // Disable priority queueing. - nil, // Use the default priority linear equation coefficients. - nil, // Use the default priority linear equation coefficients. + nil, // Use the default backoff rate limiter. + usePriorityQueue, + nil, // Use the default priority linear equation coefficients. + nil, // Use the default priority linear equation coefficients. ) Expect(workApplier3.SetupWithManager(hubMgr3)).To(Succeed()) @@ -434,10 +450,10 @@ var _ = BeforeSuite(func() { maxConcurrentReconciles, parallelizer.NewParallelizer(workerCount), 30*time.Second, - nil, // Use the default backoff rate limiter. - false, // Disable priority queueing. - nil, // Use the default priority linear equation coefficients. - nil, // Use the default priority linear equation coefficients. + nil, // Use the default backoff rate limiter. + usePriorityQueue, + nil, // Use the default priority linear equation coefficients. + nil, // Use the default priority linear equation coefficients. ) // Due to name conflicts, the third work applier must be set up manually. Expect(workApplier4.SetupWithManager(hubMgr4)).To(Succeed()) diff --git a/pkg/controllers/workapplier/waves_integration_test.go b/pkg/controllers/workapplier/waves_integration_test.go index 3296548ad..16289e67b 100644 --- a/pkg/controllers/workapplier/waves_integration_test.go +++ b/pkg/controllers/workapplier/waves_integration_test.go @@ -1259,7 +1259,9 @@ var _ = Describe("parallel processing with waves", func() { // Ensure that the AppliedWork object has been removed. appliedWorkRemovedActual := appliedWorkRemovedActual(memberClient3, workName) - Eventually(appliedWorkRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") + // As the cleanup for this test case involves the removal of many resources, we + // use a longer timeout to avoid flakiness. + Eventually(appliedWorkRemovedActual, eventuallyDuration*5, eventuallyInterval).Should(Succeed(), "Failed to remove the AppliedWork object") workRemovedActual := testutilsactuals.WorkObjectRemovedActual(ctx, hubClient, workName, memberReservedNSName3) Eventually(workRemovedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to remove the Work object") diff --git a/test/e2e/setup.sh b/test/e2e/setup.sh index df505b4c4..72f108cdf 100755 --- a/test/e2e/setup.sh +++ b/test/e2e/setup.sh @@ -212,6 +212,7 @@ do --set logVerbosity=5 \ --set namespace=fleet-system \ --set enableV1Beta1APIs=true \ + --set priorityQueue.enabled=true \ --set workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds=5 \ --set workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds=5 \ --set propertyProvider=$PROPERTY_PROVIDER \ @@ -230,6 +231,7 @@ do --set logVerbosity=5 \ --set namespace=fleet-system \ --set enableV1Beta1APIs=true \ + --set priorityQueue.enabled=true \ --set workApplierRequeueRateLimiterMaxSlowBackoffDelaySeconds=5 \ --set workApplierRequeueRateLimiterMaxFastBackoffDelaySeconds=5 \ --set propertyProvider=$PROPERTY_PROVIDER \ From 8e03e6f156fb419d1cdb09d353733659c95ca0da Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Wed, 11 Feb 2026 14:53:50 -0800 Subject: [PATCH 03/17] fix: move the controller runtime env to 1.33 and fix the script (#435) --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 4b49b106c..5a7bf1c60 100644 --- a/Makefile +++ b/Makefile @@ -66,9 +66,9 @@ GOLANGCI_LINT_BIN := golangci-lint GOLANGCI_LINT := $(abspath $(TOOLS_BIN_DIR)/$(GOLANGCI_LINT_BIN)-$(GOLANGCI_LINT_VER)) # ENVTEST_K8S_VERSION refers to the version of k8s binary assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.30.0 +ENVTEST_K8S_VERSION = 1.33.0 # ENVTEST_VER is the version of the ENVTEST binary -ENVTEST_VER = v0.0.0-20240317073005-bd9ea79e8d18 +ENVTEST_VER = release-0.22 ENVTEST_BIN := setup-envtest ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(ENVTEST_BIN)-$(ENVTEST_VER)) From bbc8f44e0468effd9301d0abcfaa9b249f239979 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 12 Feb 2026 16:08:14 +1100 Subject: [PATCH 04/17] fix: use templated service account name and namespace in hub-agent chart, add helm-push target (#432) --- .github/workflows/release.yml | 13 ++++++++++++- .gitignore | 3 +++ Makefile | 8 ++++++++ charts/hub-agent/templates/deployment.yaml | 2 +- 4 files changed, 24 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1dda21f26..a74365791 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -77,6 +77,13 @@ jobs: run: | make push + - name: Push Helm charts to OCI registry + env: + HELM_REGISTRY_PASSWORD: ${{ secrets.GITHUB_TOKEN }} + run: | + echo "${HELM_REGISTRY_PASSWORD}" | helm registry login ghcr.io -u ${{ github.actor }} --password-stdin + make helm-push + - name: Verify images run: | echo "✅ Published images:" @@ -84,4 +91,8 @@ jobs: echo " - ${{ env.REGISTRY }}/${{ env.MEMBER_AGENT_IMAGE_NAME }}:${{ env.TAG }}" echo " - ${{ env.REGISTRY }}/${{ env.REFRESH_TOKEN_IMAGE_NAME }}:${{ env.TAG }}" echo "" - echo "📦 Images are now public!" + echo "📦 Published Helm charts:" + echo " - oci://${{ env.REGISTRY }}/hub-agent:${{ env.TAG }}" + echo " - oci://${{ env.REGISTRY }}/member-agent:${{ env.TAG }}" + echo "" + echo "📦 Images and charts are now public!" diff --git a/.gitignore b/.gitignore index 828916b76..1afe8c3ea 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,6 @@ ut-coverage.xml .vscode/ .qoder/ + +# Helm chart packaging +.helm-packages/ diff --git a/Makefile b/Makefile index 5a7bf1c60..4d8b0ed21 100644 --- a/Makefile +++ b/Makefile @@ -233,6 +233,14 @@ BUILDKIT_VERSION ?= v0.18.1 push: ## Build and push all Docker images $(MAKE) OUTPUT_TYPE="type=registry" docker-build-hub-agent docker-build-member-agent docker-build-refresh-token +.PHONY: helm-push +helm-push: ## Package and push Helm charts to OCI registry + helm package charts/hub-agent --version $(TAG) --app-version $(TAG) --destination .helm-packages + helm package charts/member-agent --version $(TAG) --app-version $(TAG) --destination .helm-packages + helm push .helm-packages/hub-agent-$(TAG).tgz oci://$(REGISTRY) + helm push .helm-packages/member-agent-$(TAG).tgz oci://$(REGISTRY) + rm -rf .helm-packages + # By default, docker buildx create will pull image moby/buildkit:buildx-stable-1 and hit the too many requests error .PHONY: docker-buildx-builder # Note (chenyu1): the step below sets up emulation for building/running non-native binaries on the host. The original diff --git a/charts/hub-agent/templates/deployment.yaml b/charts/hub-agent/templates/deployment.yaml index 9d652be57..5ce8dadbb 100644 --- a/charts/hub-agent/templates/deployment.yaml +++ b/charts/hub-agent/templates/deployment.yaml @@ -30,7 +30,7 @@ spec: - --enable-guard-rail={{ .Values.enableGuardRail }} - --enable-workload={{ .Values.enableWorkload }} - --use-cert-manager={{ .Values.useCertManager }} - - --whitelisted-users=system:serviceaccount:fleet-system:hub-agent-sa + - --whitelisted-users=system:serviceaccount:{{ .Values.namespace }}:{{ include "hub-agent.fullname" . }}-sa - --webhook-client-connection-type={{.Values.webhookClientConnectionType}} - --v={{ .Values.logVerbosity }} - -add_dir_header From cb79166759372929c73619ac92c7f968ef42d768 Mon Sep 17 00:00:00 2001 From: Ryan Zhang Date: Thu, 12 Feb 2026 13:35:48 -0800 Subject: [PATCH 05/17] fix: change the error msg reg exp (#439) change the error msg reg exp Signed-off-by: Ryan Zhang --- test/apis/placement/v1beta1/api_validation_integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/apis/placement/v1beta1/api_validation_integration_test.go b/test/apis/placement/v1beta1/api_validation_integration_test.go index 44285f00c..904a691e6 100644 --- a/test/apis/placement/v1beta1/api_validation_integration_test.go +++ b/test/apis/placement/v1beta1/api_validation_integration_test.go @@ -1409,7 +1409,7 @@ var _ = Describe("Test placement v1beta1 API validation", func() { err := hubClient.Create(ctx, &strategy) var statusErr *k8sErrors.StatusError Expect(errors.As(err, &statusErr)).To(BeTrue(), fmt.Sprintf("Create updateRunStrategy call produced error %s. Error type wanted is %s.", reflect.TypeOf(err), reflect.TypeOf(&k8sErrors.StatusError{}))) - Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("Too long: may not be longer than 63")) + Expect(statusErr.ErrStatus.Message).Should(MatchRegexp("Too long: may not be more than 63 bytes")) }) It("Should deny creation of ClusterStagedUpdateStrategy with invalid stage config - stage name with invalid characters", func() { From e656b46c195bd87bd4d8905701f3e3f4b9558a7d Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 12 Feb 2026 15:20:16 -0800 Subject: [PATCH 06/17] feat: Enable Helm chart publishing to GHCR with version synchronization (#436) --- .github/workflows/chart.yml | 62 +++++- charts/README.md | 179 ++++++++++++++++++ charts/hub-agent/README.md | 77 +++++++- charts/hub-agent/templates/crds/crps.yaml | 8 +- .../crds/internalmemberclusters.yaml | 8 +- .../templates/crds/memberclusters.yaml | 8 +- charts/hub-agent/templates/crds/works.yaml | 8 +- charts/member-agent/README.md | 41 +++- .../templates/crds/appliedworks.yaml | 8 +- 9 files changed, 361 insertions(+), 38 deletions(-) create mode 100644 charts/README.md diff --git a/.github/workflows/chart.yml b/.github/workflows/chart.yml index f3022c580..323075d49 100644 --- a/.github/workflows/chart.yml +++ b/.github/workflows/chart.yml @@ -5,7 +5,7 @@ on: branches: - main paths: - - ".github/workflows/chart.yaml" + - ".github/workflows/chart.yml" - "charts/**" create: # Publish semver tags as releases. @@ -13,19 +13,73 @@ on: permissions: contents: write + packages: write + +env: + REGISTRY: ghcr.io jobs: - deploy: + publish-github-pages: runs-on: ubuntu-latest steps: - uses: actions/checkout@v6.0.2 with: submodules: true fetch-depth: 0 - - name: Publish Helm chart + - name: Publish Helm chart to GitHub Pages uses: stefanprodan/helm-gh-pages@v1.7.0 with: token: ${{ secrets.GITHUB_TOKEN }} charts_dir: charts target_dir: charts - linting: off + linting: on + + publish-oci: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v6.0.2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef + with: + registry: ${{ env.REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Package and push Helm charts to GHCR + run: | + set -euo pipefail + + # Convert repository name to lowercase for OCI registry + REPO_LOWER=$(echo "${{ github.repository }}" | tr '[:upper:]' '[:lower:]') + + # Determine version to use + if [[ "${{ github.ref }}" == refs/tags/* ]]; then + # Use release tag as version (strip 'v' prefix) + CHART_VERSION=${GITHUB_REF#refs/tags/v} + echo "Using release tag version: ${CHART_VERSION}" + else + # Use version from Chart.yaml for non-tag pushes + CHART_VERSION=$(grep '^version:' charts/hub-agent/Chart.yaml | awk '{print $2}') + echo "Using Chart.yaml version: ${CHART_VERSION}" + fi + + # Package and push hub-agent chart + echo "📦 Packaging hub-agent chart..." + helm package charts/hub-agent --version ${CHART_VERSION} --app-version ${CHART_VERSION} + + echo "🚀 Pushing hub-agent to OCI registry..." + helm push hub-agent-${CHART_VERSION}.tgz oci://${{ env.REGISTRY }}/${REPO_LOWER}/charts + + # Package and push member-agent chart + echo "📦 Packaging member-agent chart..." + helm package charts/member-agent --version ${CHART_VERSION} --app-version ${CHART_VERSION} + + echo "🚀 Pushing member-agent to OCI registry..." + helm push member-agent-${CHART_VERSION}.tgz oci://${{ env.REGISTRY }}/${REPO_LOWER}/charts + + echo "" + echo "✅ Helm charts published to OCI registry!" + echo "📍 Hub Agent: oci://${{ env.REGISTRY }}/${REPO_LOWER}/charts/hub-agent:${CHART_VERSION}" + echo "📍 Member Agent: oci://${{ env.REGISTRY }}/${REPO_LOWER}/charts/member-agent:${CHART_VERSION}" diff --git a/charts/README.md b/charts/README.md new file mode 100644 index 000000000..ab47fd547 --- /dev/null +++ b/charts/README.md @@ -0,0 +1,179 @@ +# KubeFleet Helm Charts + +This directory contains Helm charts for deploying KubeFleet components. + +## Available Charts + +- **hub-agent**: The central controller that runs on the hub cluster, managing placement decisions, scheduling, and cluster inventory +- **member-agent**: The agent that runs on each member cluster, applying workloads and reporting cluster status + +## Chart Versioning + +**Important:** Chart versions match the KubeFleet release versions. When a KubeFleet release is tagged (e.g., `v0.2.1`), the Helm charts are published with the same version (`0.2.1`). + +**Example:** To install KubeFleet v0.2.1, use: +```bash +helm install hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent --version 0.2.1 +``` + +This ensures consistency between the application version and the chart version, making it easy to know which chart version to use with each KubeFleet release. + +## Using Published Charts + +KubeFleet Helm charts are automatically published to both GitHub Container Registry (GHCR) as OCI artifacts and GitHub Pages as a traditional Helm repository. + +### Option 1: OCI Registry (Recommended) + +Install directly from GitHub Container Registry without adding a repository: + +#### Hub Agent + +```bash +# Install hub-agent on the hub cluster (replace VERSION with your desired release) +helm install hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version VERSION \ + --namespace fleet-system \ + --create-namespace +``` + +#### Member Agent + +```bash +# Install member-agent on each member cluster (replace VERSION with your desired release) +helm install member-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/member-agent \ + --version VERSION \ + --namespace fleet-system \ + --create-namespace +``` + +### Option 2: Traditional Helm Repository + +Add the repository and install from it: + +```bash +# Add the KubeFleet Helm repository +helm repo add kubefleet https://kubefleet-dev.github.io/kubefleet/charts + +# Update your local Helm chart repository cache +helm repo update + +# Install hub-agent +helm install hub-agent kubefleet/hub-agent \ + --namespace fleet-system \ + --create-namespace + +# Install member-agent +helm install member-agent kubefleet/member-agent \ + --namespace fleet-system \ + --create-namespace +``` + +### Installing Specific Versions + +#### OCI Registry + +```bash +# Install a specific version from OCI registry (e.g., v0.2.1 release) +helm install hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version 0.2.1 \ + --namespace fleet-system \ + --create-namespace +``` + +#### Traditional Repository + +```bash +# List available versions +helm search repo kubefleet --versions + +# Install a specific version (e.g., v0.2.1 release) +helm install hub-agent kubefleet/hub-agent \ + --version 0.2.1 \ + --namespace fleet-system \ + --create-namespace +``` + +### Upgrading Charts + +#### OCI Registry + +```bash +# Upgrade to a specific version (e.g., v0.2.1) +helm upgrade hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version 0.2.1 \ + --namespace fleet-system + +helm upgrade member-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/member-agent \ + --version 0.2.1 \ + --namespace fleet-system +``` + +#### Traditional Repository + +```bash +# Upgrade to latest version +helm upgrade hub-agent kubefleet/hub-agent --namespace fleet-system +helm upgrade member-agent kubefleet/member-agent --namespace fleet-system +``` + +## Chart Publishing + +Charts are automatically published to both locations when: +- Changes are pushed to the `main` branch affecting chart files +- A version tag (e.g., `v1.0.0`) is created + +**Published Locations:** +- **OCI Registry**: `oci://ghcr.io/kubefleet-dev/kubefleet/charts/{chart-name}` +- **GitHub Pages**: `https://kubefleet-dev.github.io/kubefleet/charts` + +The publishing workflow is defined in `.github/workflows/chart.yml`. + +## Development + +### Local Installation + +For development and testing, you can install charts directly from the local repository: + +```bash +# Install from local path +helm install hub-agent ./charts/hub-agent --namespace fleet-system --create-namespace +helm install member-agent ./charts/member-agent --namespace fleet-system --create-namespace +``` + +### Linting + +```bash +# Lint a chart +helm lint charts/hub-agent +helm lint charts/member-agent +``` + +### Packaging + +```bash +# Package charts locally +helm package charts/hub-agent +helm package charts/member-agent +``` + +## Chart Documentation + +For detailed documentation on each chart including configuration parameters, see: +- [Hub Agent Chart](./hub-agent/README.md) +- [Member Agent Chart](./member-agent/README.md) + +## Contributing + +When making changes to charts: +1. Update the chart version in `Chart.yaml` following [Semantic Versioning](https://semver.org/) +2. Update the `appVersion` if the application version changes +3. Run `helm lint` to validate your changes +4. Update the chart's README.md with any new parameters or changes +5. Test the chart installation locally before submitting a PR + +## Support + +For issues or questions about KubeFleet Helm charts, please: +- Check the [main documentation](https://kubefleet.dev/docs/) +- Review chart-specific READMEs +- Open an issue in the [GitHub repository](https://github.com/kubefleet-dev/kubefleet/issues) diff --git a/charts/hub-agent/README.md b/charts/hub-agent/README.md index 5ab3221eb..92b7d5854 100644 --- a/charts/hub-agent/README.md +++ b/charts/hub-agent/README.md @@ -1,8 +1,37 @@ # Hub agent controller Helm Chart +## Chart Versioning + +Chart versions match the KubeFleet release versions. For example, to install KubeFleet v0.2.1, use chart version `0.2.1`. + ## Install Chart -### Default Installation (Self-Signed Certificates) +### Using Published Chart (Recommended) + +The hub-agent chart is published to both GitHub Container Registry (OCI) and GitHub Pages. + +#### Option 1: OCI Registry (Recommended) + +```console +# Install directly from OCI registry (replace VERSION with the desired release) +helm install hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version VERSION \ + --namespace fleet-system \ + --create-namespace +``` + +#### Option 2: Traditional Helm Repository + +```console +# Add the KubeFleet Helm repository +helm repo add kubefleet https://kubefleet-dev.github.io/kubefleet/charts +helm repo update + +# Install hub-agent (specify --version to pin to a specific release) +helm install hub-agent kubefleet/hub-agent --namespace fleet-system --create-namespace +``` + +### Local Installation from Source ```console # Helm install with fleet-system namespace already created @@ -23,8 +52,18 @@ helm install cert-manager jetstack/cert-manager \ --create-namespace \ --set crds.enabled=true -# Then install hub-agent with cert-manager enabled -helm install hub-agent ./charts/hub-agent --set useCertManager=true --set enableWorkload=true --set enableWebhook=true +# Then install hub-agent with cert-manager enabled (OCI, specify VERSION) +helm install hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version VERSION \ + --set useCertManager=true \ + --set enableWorkload=true \ + --set enableWebhook=true + +# Or using traditional repository +helm install hub-agent kubefleet/hub-agent \ + --set useCertManager=true \ + --set enableWorkload=true \ + --set enableWebhook=true ``` This configures cert-manager to manage webhook certificates. @@ -32,7 +71,13 @@ This configures cert-manager to manage webhook certificates. ## Upgrade Chart ```console -helm upgrade hub-agent ./charts/hubagent/ --namespace fleet-system --create-namespace +# Using OCI registry (specify VERSION) +helm upgrade hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version VERSION \ + --namespace fleet-system + +# Using traditional repository +helm upgrade hub-agent kubefleet/hub-agent --namespace fleet-system ``` _See [parameters](#parameters) below._ @@ -105,8 +150,18 @@ helm install cert-manager jetstack/cert-manager \ --create-namespace \ --set crds.enabled=true -# Then install hub-agent with cert-manager enabled -helm install hub-agent ./charts/hub-agent --set useCertManager=true --set enableWorkload=true --set enableWebhook=true +# Then install hub-agent with cert-manager enabled (OCI, specify VERSION) +helm install hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version VERSION \ + --set useCertManager=true \ + --set enableWorkload=true \ + --set enableWebhook=true + +# Or using traditional repository +helm install hub-agent kubefleet/hub-agent \ + --set useCertManager=true \ + --set enableWorkload=true \ + --set enableWebhook=true ``` The `webhookCertSecretName` parameter specifies the Secret name for the certificate: @@ -116,7 +171,15 @@ The `webhookCertSecretName` parameter specifies the Secret name for the certific Example with custom secret name: ```console -helm install hub-agent ./charts/hub-agent \ +# Using OCI registry (specify VERSION) +helm install hub-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/hub-agent \ + --version VERSION \ + --set useCertManager=true \ + --set enableWorkload=true \ + --set webhookCertSecretName=my-webhook-secret + +# Using traditional repository +helm install hub-agent kubefleet/hub-agent \ --set useCertManager=true \ --set enableWorkload=true \ --set webhookCertSecretName=my-webhook-secret diff --git a/charts/hub-agent/templates/crds/crps.yaml b/charts/hub-agent/templates/crds/crps.yaml index f7a1be908..8e92ec2c5 100644 --- a/charts/hub-agent/templates/crds/crps.yaml +++ b/charts/hub-agent/templates/crds/crps.yaml @@ -1,4 +1,4 @@ -{{ $files := .Files }} -{{ if .Values.enableV1Beta1APIs }} - {{ $files.Get "crdbases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml" }} -{{ end }} +{{- $files := .Files }} +{{- if .Values.enableV1Beta1APIs }} +{{ $files.Get "crdbases/placement.kubernetes-fleet.io_clusterresourceplacements.yaml" }} +{{- end }} diff --git a/charts/hub-agent/templates/crds/internalmemberclusters.yaml b/charts/hub-agent/templates/crds/internalmemberclusters.yaml index 685e081a9..f036fa06f 100644 --- a/charts/hub-agent/templates/crds/internalmemberclusters.yaml +++ b/charts/hub-agent/templates/crds/internalmemberclusters.yaml @@ -1,4 +1,4 @@ -{{ $files := .Files }} -{{ if .Values.enableV1Beta1APIs }} - {{ $files.Get "crdbases/cluster.kubernetes-fleet.io_internalmemberclusters.yaml" }} -{{ end }} +{{- $files := .Files }} +{{- if .Values.enableV1Beta1APIs }} +{{ $files.Get "crdbases/cluster.kubernetes-fleet.io_internalmemberclusters.yaml" }} +{{- end }} diff --git a/charts/hub-agent/templates/crds/memberclusters.yaml b/charts/hub-agent/templates/crds/memberclusters.yaml index 6d8ad48c5..a77392097 100644 --- a/charts/hub-agent/templates/crds/memberclusters.yaml +++ b/charts/hub-agent/templates/crds/memberclusters.yaml @@ -1,4 +1,4 @@ -{{ $files := .Files }} -{{ if .Values.enableV1Beta1APIs }} - {{ $files.Get "crdbases/cluster.kubernetes-fleet.io_memberclusters.yaml" }} -{{ end }} +{{- $files := .Files }} +{{- if .Values.enableV1Beta1APIs }} +{{ $files.Get "crdbases/cluster.kubernetes-fleet.io_memberclusters.yaml" }} +{{- end }} diff --git a/charts/hub-agent/templates/crds/works.yaml b/charts/hub-agent/templates/crds/works.yaml index d1b245b7c..ad3fa6924 100644 --- a/charts/hub-agent/templates/crds/works.yaml +++ b/charts/hub-agent/templates/crds/works.yaml @@ -1,4 +1,4 @@ -{{ $files := .Files }} -{{ if .Values.enableV1Beta1APIs }} - {{ $files.Get "crdbases/placement.kubernetes-fleet.io_works.yaml" }} -{{ end }} +{{- $files := .Files }} +{{- if .Values.enableV1Beta1APIs }} +{{ $files.Get "crdbases/placement.kubernetes-fleet.io_works.yaml" }} +{{- end }} diff --git a/charts/member-agent/README.md b/charts/member-agent/README.md index db1d894c5..027f95357 100644 --- a/charts/member-agent/README.md +++ b/charts/member-agent/README.md @@ -1,13 +1,37 @@ # Azure Fleet Member Agent Helm Chart -## Get Repo +## Chart Versioning + +Chart versions match the KubeFleet release versions. For example, to install KubeFleet v0.2.1, use chart version `0.2.1`. + +## Install Chart + +### Using Published Chart (Recommended) + +The member-agent chart is published to both GitHub Container Registry (OCI) and GitHub Pages. + +#### Option 1: OCI Registry (Recommended) ```console -helm repo add member-agent https://azure.github.io/fleet/charts/member-agent +# Install directly from OCI registry (replace VERSION with the desired release) +helm install member-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/member-agent \ + --version VERSION \ + --namespace fleet-system \ + --create-namespace +``` + +#### Option 2: Traditional Helm Repository + +```console +# Add the KubeFleet Helm repository +helm repo add kubefleet https://kubefleet-dev.github.io/kubefleet/charts helm repo update + +# Install member-agent (specify --version to pin to a specific release) +helm install member-agent kubefleet/member-agent --namespace fleet-system --create-namespace ``` -## Install Chart +### From Local Source ```console # Go to `charts` folder inside the repo @@ -21,10 +45,13 @@ _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documen ## Upgrade Chart ```console -# Go to `charts` folder inside the repo -cd /fleet/charts -# Helm upgrade -helm upgrade member-agent member-agent/ --namespace fleet-system +# Using OCI registry (specify VERSION) +helm upgrade member-agent oci://ghcr.io/kubefleet-dev/kubefleet/charts/member-agent \ + --version VERSION \ + --namespace fleet-system + +# Using traditional repository +helm upgrade member-agent kubefleet/member-agent --namespace fleet-system ``` ## Parameters diff --git a/charts/member-agent/templates/crds/appliedworks.yaml b/charts/member-agent/templates/crds/appliedworks.yaml index 5d0bbc742..90c84adb0 100644 --- a/charts/member-agent/templates/crds/appliedworks.yaml +++ b/charts/member-agent/templates/crds/appliedworks.yaml @@ -1,4 +1,4 @@ -{{ $files := .Files }} -{{ if .Values.enableV1Beta1APIs }} - {{ $files.Get "crdbases/placement.kubernetes-fleet.io_appliedworks.yaml" }} -{{ end }} +{{- $files := .Files }} +{{- if .Values.enableV1Beta1APIs }} +{{ $files.Get "crdbases/placement.kubernetes-fleet.io_appliedworks.yaml" }} +{{- end }} From 0d14e4779cb59e5af61be8eb07b340e1c77532a5 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Fri, 13 Feb 2026 13:50:11 -0800 Subject: [PATCH 07/17] chore: bump Go version to 1.24.13 to fix CVE (#438) * Initial plan * feat: bump Go version to 1.24.13 Co-authored-by: weng271190436 <6925089+weng271190436@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: weng271190436 <6925089+weng271190436@users.noreply.github.com> Co-authored-by: Wei Weng --- .github/workflows/ci.yml | 2 +- .github/workflows/code-lint.yml | 2 +- .github/workflows/release.yml | 2 +- .github/workflows/trivy.yml | 2 +- .github/workflows/upgrade.yml | 2 +- .golangci.yml | 2 +- docker/hub-agent.Dockerfile | 2 +- docker/member-agent.Dockerfile | 2 +- docker/refresh-token.Dockerfile | 2 +- go.mod | 2 +- 10 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 562aa6375..bde484de2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,7 +13,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.12' + GO_VERSION: '1.24.13' CERT_MANAGER_VERSION: 'v1.16.2' jobs: diff --git a/.github/workflows/code-lint.yml b/.github/workflows/code-lint.yml index 65908017b..8bf3c0614 100644 --- a/.github/workflows/code-lint.yml +++ b/.github/workflows/code-lint.yml @@ -14,7 +14,7 @@ on: env: # Common versions - GO_VERSION: '1.24.12' + GO_VERSION: '1.24.13' jobs: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a74365791..fba62acef 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ env: HUB_AGENT_IMAGE_NAME: hub-agent MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.24.12' + GO_VERSION: '1.24.13' jobs: export-registry: diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 3cf096830..b6dc54430 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -18,7 +18,7 @@ env: MEMBER_AGENT_IMAGE_NAME: member-agent REFRESH_TOKEN_IMAGE_NAME: refresh-token - GO_VERSION: '1.24.12' + GO_VERSION: '1.24.13' jobs: export-registry: diff --git a/.github/workflows/upgrade.yml b/.github/workflows/upgrade.yml index 318a531aa..2d6911bff 100644 --- a/.github/workflows/upgrade.yml +++ b/.github/workflows/upgrade.yml @@ -17,7 +17,7 @@ on: paths-ignore: [docs/**, "**.md", "**.mdx", "**.png", "**.jpg"] env: - GO_VERSION: '1.24.12' + GO_VERSION: '1.24.13' jobs: detect-noop: diff --git a/.golangci.yml b/.golangci.yml index d8c09541b..556731073 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: timeout: 15m - go: '1.24.12' + go: '1.24.13' linters-settings: stylecheck: diff --git a/docker/hub-agent.Dockerfile b/docker/hub-agent.Dockerfile index faa7dfc6e..1018a87a2 100644 --- a/docker/hub-agent.Dockerfile +++ b/docker/hub-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the hubagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.13 AS builder ARG GOOS=linux ARG GOARCH=amd64 diff --git a/docker/member-agent.Dockerfile b/docker/member-agent.Dockerfile index 43075a67b..3a58cf33f 100644 --- a/docker/member-agent.Dockerfile +++ b/docker/member-agent.Dockerfile @@ -1,5 +1,5 @@ # Build the memberagent binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.13 AS builder ARG GOOS=linux ARG GOARCH=amd64 diff --git a/docker/refresh-token.Dockerfile b/docker/refresh-token.Dockerfile index b79d3a389..31a492e86 100644 --- a/docker/refresh-token.Dockerfile +++ b/docker/refresh-token.Dockerfile @@ -1,5 +1,5 @@ # Build the refreshtoken binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.13 AS builder ARG GOOS="linux" ARG GOARCH="amd64" diff --git a/go.mod b/go.mod index 543ca7c84..b46f8dc38 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kubefleet-dev/kubefleet -go 1.24.12 +go 1.24.13 require ( github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 From fb200207d5a3b6052b5e65f7305f7be849d962b3 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Fri, 13 Feb 2026 13:51:47 -0800 Subject: [PATCH 08/17] fix: e2e flaky test: Ensure CRO snapshot exists before CRP creation to prevent placement race (#441) --- test/e2e/placement_cro_test.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/test/e2e/placement_cro_test.go b/test/e2e/placement_cro_test.go index 0419e29c8..698c7428b 100644 --- a/test/e2e/placement_cro_test.go +++ b/test/e2e/placement_cro_test.go @@ -604,6 +604,7 @@ var _ = Context("creating clusterResourceOverride with and resource becomes inva var _ = Context("creating clusterResourceOverride with delete rules for one cluster", Ordered, func() { crpName := fmt.Sprintf(crpNameTemplate, GinkgoParallelProcess()) croName := fmt.Sprintf(croNameTemplate, GinkgoParallelProcess()) + croSnapShotName := fmt.Sprintf(placementv1beta1.OverrideSnapshotNameFmt, croName, 0) BeforeAll(func() { By("creating work resources") @@ -659,6 +660,12 @@ var _ = Context("creating clusterResourceOverride with delete rules for one clus By(fmt.Sprintf("creating clusterResourceOverride %s", croName)) Expect(hubClient.Create(ctx, cro)).To(Succeed(), "Failed to create clusterResourceOverride %s", croName) + // This is to make sure the CRO snapshot is created before the CRP. + Eventually(func() error { + croSnap := &placementv1beta1.ClusterResourceOverrideSnapshot{} + return hubClient.Get(ctx, types.NamespacedName{Name: croSnapShotName}, croSnap) + }, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to create CRO snapshot %s", croSnapShotName) + // Create the CRP. createCRP(crpName) }) @@ -697,14 +704,9 @@ var _ = Context("creating clusterResourceOverride with delete rules for one clus It("should not place the selected resources on the member clusters that are deleted", func() { memberCluster := allMemberClusters[2] - Consistently(func() bool { - ns := &corev1.Namespace{} - workNamespaceName := fmt.Sprintf(workNamespaceNameTemplate, GinkgoParallelProcess()) - if err := memberCluster.KubeClient.Get(ctx, types.NamespacedName{Name: workNamespaceName}, ns); err != nil { - return errors.IsNotFound(err) - } - return false - }, consistentlyDuration, eventuallyInterval).Should(BeTrue(), "Failed to delete work resources on member cluster %s", memberCluster.ClusterName) + // With CRO snapshot ready before CRP creation, resources should never be placed. + workResourcesRemovedActual := workNamespaceRemovedFromClusterActual(memberCluster) + Consistently(workResourcesRemovedActual, consistentlyDuration, consistentlyInterval).Should(Succeed(), "Work resources should not be placed on member cluster %s", memberCluster.ClusterName) }) }) From f0af0d6e806489eaae309fe25b070a99b999bc72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Feb 2026 11:25:35 -0800 Subject: [PATCH 09/17] chore: bump docker/login-action from 3.6.0 to 3.7.0 (#423) --- .github/workflows/release.yml | 2 +- .github/workflows/trivy.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index fba62acef..c2671e9bc 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -67,7 +67,7 @@ jobs: uses: actions/checkout@v6.0.2 - name: Login to ghcr.io - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef + uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 with: registry: ghcr.io username: ${{ github.actor }} diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index b6dc54430..11aeafe4b 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -47,7 +47,7 @@ jobs: uses: actions/checkout@v6.0.2 - name: Login to ${{ env.REGISTRY }} - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef + uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From d14f099a6482afab313a53c18d44552d7fc3d93b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Feb 2026 11:25:56 -0800 Subject: [PATCH 10/17] chore: bump step-security/harden-runner from 2.14.0 to 2.14.1 (#422) Bumps [step-security/harden-runner](https://github.com/step-security/harden-runner) from 2.14.0 to 2.14.1. - [Release notes](https://github.com/step-security/harden-runner/releases) - [Commits](https://github.com/step-security/harden-runner/compare/20cf305ff2072d973412fa9b1e3a4f227bda3c76...e3f713f2d8f53843e71c69a996d56f51aa9adfb9) --- updated-dependencies: - dependency-name: step-security/harden-runner dependency-version: 2.14.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Ryan Zhang Co-authored-by: Wei Weng --- .github/workflows/codespell.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index aac7f666b..f1530425b 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit From 481dccb993aebc5ddf287a10dcc3fbeef0f30a8c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Feb 2026 11:13:23 -0500 Subject: [PATCH 11/17] chore: bump step-security/harden-runner from 2.14.1 to 2.14.2 (#445) Bumps [step-security/harden-runner](https://github.com/step-security/harden-runner) from 2.14.1 to 2.14.2. - [Release notes](https://github.com/step-security/harden-runner/releases) - [Commits](https://github.com/step-security/harden-runner/compare/e3f713f2d8f53843e71c69a996d56f51aa9adfb9...5ef0c079ce82195b2a36a210272d6b661572d83e) --- updated-dependencies: - dependency-name: step-security/harden-runner dependency-version: 2.14.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codespell.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index f1530425b..1bd5a15ae 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden Runner - uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 + uses: step-security/harden-runner@5ef0c079ce82195b2a36a210272d6b661572d83e # v2.14.2 with: egress-policy: audit From 25d2bf89a4e3ce6881b7ed20b55f8532597093b7 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Tue, 17 Feb 2026 08:23:16 -0800 Subject: [PATCH 12/17] fix: Pin staticcheck to v0.6.1 for Go 1.24.13 compatibility (#447) * Initial plan * fix: pin staticcheck to v0.6.1 for Go 1.24.13 compatibility The master branch of staticcheck now requires Go 1.25.0+, but this project uses Go 1.24.13. Pin to v0.6.1 which is the latest version compatible with Go 1.24.13. Fixes staticcheck CI failure: "requires go >= 1.25.0 (running go 1.24.13)" Co-authored-by: weng271190436 <6925089+weng271190436@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: weng271190436 <6925089+weng271190436@users.noreply.github.com> --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4d8b0ed21..c3cfb251c 100644 --- a/Makefile +++ b/Makefile @@ -53,7 +53,7 @@ CONTROLLER_GEN_VER := v0.16.0 CONTROLLER_GEN_BIN := controller-gen CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)) -STATICCHECK_VER := master +STATICCHECK_VER := v0.6.1 STATICCHECK_BIN := staticcheck STATICCHECK := $(abspath $(TOOLS_BIN_DIR)/$(STATICCHECK_BIN)-$(STATICCHECK_VER)) From 168e3f384c9a5f8a9bd69418d077110a4d2b6278 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Feb 2026 11:39:04 -0500 Subject: [PATCH 13/17] chore: bump docker/login-action from 3.6.0 to 3.7.0 (#446) Bumps [docker/login-action](https://github.com/docker/login-action) from 3.6.0 to 3.7.0. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v3.6.0...c94ce9fb468520275223c153574b00df6fe4bcc9) --- updated-dependencies: - dependency-name: docker/login-action dependency-version: 3.7.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Wei Weng --- .github/workflows/chart.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/chart.yml b/.github/workflows/chart.yml index 323075d49..bf911eeae 100644 --- a/.github/workflows/chart.yml +++ b/.github/workflows/chart.yml @@ -41,7 +41,7 @@ jobs: uses: actions/checkout@v6.0.2 - name: Login to GitHub Container Registry - uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef + uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From b1bd8c48baa73f09b2c33775a54d60aaf9910236 Mon Sep 17 00:00:00 2001 From: Britania Rodriguez Reyes <145056127+britaniar@users.noreply.github.com> Date: Tue, 17 Feb 2026 10:27:57 -0800 Subject: [PATCH 14/17] feat: Create struct for Resource Snapshot Creation to use across controllers (#429) --- cmd/hubagent/workload/setup.go | 15 +- pkg/controllers/placement/controller.go | 458 +--- pkg/controllers/placement/controller_test.go | 2188 ---------------- pkg/controllers/placement/suite_test.go | 2 + .../controller/resource_snapshot_resolver.go | 483 +++- .../resource_snapshot_resolver_test.go | 2248 ++++++++++++++++- 6 files changed, 2726 insertions(+), 2668 deletions(-) diff --git a/cmd/hubagent/workload/setup.go b/cmd/hubagent/workload/setup.go index 00064faf6..3d8450ee1 100644 --- a/cmd/hubagent/workload/setup.go +++ b/cmd/hubagent/workload/setup.go @@ -164,14 +164,15 @@ func SetupControllers(ctx context.Context, wg *sync.WaitGroup, mgr ctrl.Manager, SkippedNamespaces: skippedNamespaces, EnableWorkload: opts.EnableWorkload, } + resourceSnapshotResolver := controller.NewResourceSnapshotResolver(mgr.GetClient(), mgr.GetScheme()) + resourceSnapshotResolver.Config = controller.NewResourceSnapshotConfig(opts.ResourceSnapshotCreationMinimumInterval, opts.ResourceChangesCollectionDuration) pc := &placement.Reconciler{ - Client: mgr.GetClient(), - Recorder: mgr.GetEventRecorderFor(placementControllerName), - Scheme: mgr.GetScheme(), - UncachedReader: mgr.GetAPIReader(), - ResourceSelectorResolver: resourceSelectorResolver, - ResourceSnapshotCreationMinimumInterval: opts.ResourceSnapshotCreationMinimumInterval, - ResourceChangesCollectionDuration: opts.ResourceChangesCollectionDuration, + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor(placementControllerName), + Scheme: mgr.GetScheme(), + UncachedReader: mgr.GetAPIReader(), + ResourceSelectorResolver: resourceSelectorResolver, + ResourceSnapshotResolver: *resourceSnapshotResolver, } rateLimiter := options.DefaultControllerRateLimiter(opts.RateLimiterOpts) diff --git a/pkg/controllers/placement/controller.go b/pkg/controllers/placement/controller.go index bc53ee8dd..7001ccf9a 100644 --- a/pkg/controllers/placement/controller.go +++ b/pkg/controllers/placement/controller.go @@ -48,14 +48,8 @@ import ( "github.com/kubefleet-dev/kubefleet/pkg/utils/defaulter" "github.com/kubefleet-dev/kubefleet/pkg/utils/labels" "github.com/kubefleet-dev/kubefleet/pkg/utils/resource" - fleettime "github.com/kubefleet-dev/kubefleet/pkg/utils/time" ) -// The max size of an object in k8s is 1.5MB because of ETCD limit https://etcd.io/docs/v3.3/dev-guide/limit/. -// We choose 800KB as the soft limit for all the selected resources within one resourceSnapshot object because of this test in k8s which checks -// if object size is greater than 1MB https://github.com/kubernetes/kubernetes/blob/db1990f48b92d603f469c1c89e2ad36da1b74846/test/integration/master/synthetic_master_test.go#L337 -var resourceSnapshotResourceSizeLimit = 800 * (1 << 10) // 800KB - // We use a safety resync period to requeue all the finished request just in case there is a bug in the system. // TODO: unify all the controllers with this pattern and make this configurable in place of the controller runtime resync period. const controllerResyncPeriod = 30 * time.Minute @@ -77,12 +71,8 @@ type Reconciler struct { // ResourceSelectorResolver ResourceSelectorResolver controller.ResourceSelectorResolver - // ResourceSnapshotCreationMinimumInterval is the minimum interval to create a new resourcesnapshot - // to avoid too frequent updates. - ResourceSnapshotCreationMinimumInterval time.Duration - - // ResourceChangesCollectionDuration is the duration for collecting resource changes into one snapshot. - ResourceChangesCollectionDuration time.Duration + // ResourceSnapshotResolver + ResourceSnapshotResolver controller.ResourceSnapshotResolver } func (r *Reconciler) Reconcile(ctx context.Context, key controller.QueueKey) (ctrl.Result, error) { @@ -223,7 +213,7 @@ func (r *Reconciler) handleUpdate(ctx context.Context, placementObj fleetv1beta1 return ctrl.Result{}, err } - createResourceSnapshotRes, latestResourceSnapshot, err := r.getOrCreateResourceSnapshot(ctx, placementObj, envelopeObjCount, + createResourceSnapshotRes, latestResourceSnapshot, err := r.ResourceSnapshotResolver.GetOrCreateResourceSnapshot(ctx, placementObj, envelopeObjCount, &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: selectedResources}, int(revisionLimit)) if err != nil { return ctrl.Result{}, err @@ -422,309 +412,6 @@ func (r *Reconciler) deleteRedundantSchedulingPolicySnapshots(ctx context.Contex return nil } -// deleteRedundantResourceSnapshots handles multiple snapshots in a group. -func (r *Reconciler) deleteRedundantResourceSnapshots(ctx context.Context, placementObj fleetv1beta1.PlacementObj, revisionHistoryLimit int) error { - sortedList, err := r.listSortedResourceSnapshots(ctx, placementObj) - if err != nil { - return err - } - - items := sortedList.GetResourceSnapshotObjs() - if len(items) < revisionHistoryLimit { - // If the number of existing snapshots is less than the limit no matter how many snapshots in a group, we don't - // need to delete any snapshots. - // Skip the checking and deleting. - return nil - } - - placementKObj := klog.KObj(placementObj) - lastGroupIndex := -1 - groupCounter := 0 - - // delete the snapshots from the end as there are could be multiple snapshots in a group in order to keep the latest - // snapshots from the end. - for i := len(items) - 1; i >= 0; i-- { - snapshotKObj := klog.KObj(items[i]) - ii, err := labels.ExtractResourceIndexFromResourceSnapshot(items[i]) - if err != nil { - klog.ErrorS(err, "Failed to parse the resource index label", "placement", placementKObj, "resourceSnapshot", snapshotKObj) - return controller.NewUnexpectedBehaviorError(err) - } - if ii != lastGroupIndex { - groupCounter++ - lastGroupIndex = ii - } - if groupCounter < revisionHistoryLimit { // need to reserve one slot for the new snapshot - // When the number of group is less than the revision limit, skipping deleting the snapshot. - continue - } - if err := r.Client.Delete(ctx, items[i]); err != nil && !apierrors.IsNotFound(err) { - klog.ErrorS(err, "Failed to delete resourceSnapshot", "placement", placementKObj, "resourceSnapshot", snapshotKObj) - return controller.NewAPIServerError(false, err) - } - } - if groupCounter-revisionHistoryLimit > 0 { - // We always delete before creating a new snapshot, the snapshot group size should never exceed the limit - // as there is no finalizer added and the object should be deleted immediately. - klog.Warning("The number of resourceSnapshot groups exceeds the revisionHistoryLimit and it should never happen", "placement", placementKObj, "numberOfSnapshotGroups", groupCounter, "revisionHistoryLimit", revisionHistoryLimit) - } - return nil -} - -// getOrCreateResourceSnapshot gets or creates a resource snapshot for the given placement. -// It returns the latest resource snapshot if it exists and is up to date, otherwise it creates a new one. -// It also returns the ctrl.Result to indicate whether the request should be requeued or not. -// Note: when the ctrl.Result.Requeue is true, it still returns the current latest resourceSnapshot so that -// placement can update the rollout status. -func (r *Reconciler) getOrCreateResourceSnapshot(ctx context.Context, placement fleetv1beta1.PlacementObj, envelopeObjCount int, resourceSnapshotSpec *fleetv1beta1.ResourceSnapshotSpec, revisionHistoryLimit int) (ctrl.Result, fleetv1beta1.ResourceSnapshotObj, error) { - placementKObj := klog.KObj(placement) - resourceHash, err := resource.HashOf(resourceSnapshotSpec) - if err != nil { - klog.ErrorS(err, "Failed to generate resource hash", "placement", placementKObj) - return ctrl.Result{}, nil, controller.NewUnexpectedBehaviorError(err) - } - - // latestResourceSnapshotIndex should be -1 when there is no snapshot. - latestResourceSnapshot, latestResourceSnapshotIndex, err := r.lookupLatestResourceSnapshot(ctx, placement) - if err != nil { - return ctrl.Result{}, nil, err - } - - latestResourceSnapshotHash := "" - numberOfSnapshots := -1 - if latestResourceSnapshot != nil { - latestResourceSnapshotHash, err = annotations.ParseResourceGroupHashFromAnnotation(latestResourceSnapshot) - if err != nil { - klog.ErrorS(err, "Failed to get the ResourceGroupHashAnnotation", "resourceSnapshot", klog.KObj(latestResourceSnapshot)) - return ctrl.Result{}, nil, controller.NewUnexpectedBehaviorError(err) - } - numberOfSnapshots, err = annotations.ExtractNumberOfResourceSnapshotsFromResourceSnapshot(latestResourceSnapshot) - if err != nil { - klog.ErrorS(err, "Failed to get the NumberOfResourceSnapshotsAnnotation", "resourceSnapshot", klog.KObj(latestResourceSnapshot)) - return ctrl.Result{}, nil, controller.NewUnexpectedBehaviorError(err) - } - } - - shouldCreateNewMasterResourceSnapshot := true - // This index indicates the selected resource in the split selectedResourceList, if this index is zero we start - // from creating the master resourceSnapshot if it's greater than zero it means that the master resourceSnapshot - // got created but not all sub-indexed resourceSnapshots have been created yet. It covers the corner case where the - // controller crashes in the middle. - resourceSnapshotStartIndex := 0 - if latestResourceSnapshot != nil && latestResourceSnapshotHash == resourceHash { - if err := r.ensureLatestResourceSnapshot(ctx, latestResourceSnapshot); err != nil { - return ctrl.Result{}, nil, err - } - // check to see all that the master cluster resource snapshot and sub-indexed snapshots belonging to the same group index exists. - resourceSnapshotList, err := controller.ListAllResourceSnapshotWithAnIndex(ctx, r.Client, latestResourceSnapshot.GetLabels()[fleetv1beta1.ResourceIndexLabel], placement.GetName(), placement.GetNamespace()) - if err != nil { - klog.ErrorS(err, "Failed to list the latest group resourceSnapshots associated with the placement", "placement", placementKObj) - return ctrl.Result{}, nil, controller.NewAPIServerError(true, err) - } - if len(resourceSnapshotList.GetResourceSnapshotObjs()) == numberOfSnapshots { - klog.V(2).InfoS("resourceSnapshots have not changed", "placement", placementKObj, "resourceSnapshot", klog.KObj(latestResourceSnapshot)) - return ctrl.Result{}, latestResourceSnapshot, nil - } - // we should not create a new master cluster resource snapshot. - shouldCreateNewMasterResourceSnapshot = false - // set resourceSnapshotStartIndex to start from this index, so we don't try to recreate existing sub-indexed cluster resource snapshots. - resourceSnapshotStartIndex = len(resourceSnapshotList.GetResourceSnapshotObjs()) - } - - // Need to create new snapshot when 1) there is no snapshots or 2) the latest snapshot hash != current one. - // mark the last resource snapshot as inactive if it is different from what we have now or 3) when some - // sub-indexed cluster resource snapshots belonging to the same group have not been created, the master - // cluster resource snapshot should exist and be latest. - if latestResourceSnapshot != nil && latestResourceSnapshotHash != resourceHash && latestResourceSnapshot.GetLabels()[fleetv1beta1.IsLatestSnapshotLabel] == strconv.FormatBool(true) { - // When the latest resource snapshot without the isLastest label, it means it fails to create the new - // resource snapshot in the last reconcile and we don't need to check and delay the request. - res, error := r.shouldCreateNewResourceSnapshotNow(ctx, latestResourceSnapshot) - if error != nil { - return ctrl.Result{}, nil, error - } - if res.RequeueAfter > 0 { - // If the latest resource snapshot is not ready to be updated, we requeue the request. - return res, latestResourceSnapshot, nil - } - shouldCreateNewMasterResourceSnapshot = true - // set the latest label to false first to make sure there is only one or none active resource snapshot - labels := latestResourceSnapshot.GetLabels() - labels[fleetv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) - latestResourceSnapshot.SetLabels(labels) - if err := r.Client.Update(ctx, latestResourceSnapshot); err != nil { - klog.ErrorS(err, "Failed to set the isLatestSnapshot label to false", "resourceSnapshot", klog.KObj(latestResourceSnapshot)) - return ctrl.Result{}, nil, controller.NewUpdateIgnoreConflictError(err) - } - klog.V(2).InfoS("Marked the existing resourceSnapshot as inactive", "placement", placementKObj, "resourceSnapshot", klog.KObj(latestResourceSnapshot)) - } - - // only delete redundant resource snapshots and increment the latest resource snapshot index if new master resource snapshot is to be created. - if shouldCreateNewMasterResourceSnapshot { - // delete redundant snapshot revisions before creating a new master resource snapshot to guarantee that the number of snapshots won't exceed the limit. - if err := r.deleteRedundantResourceSnapshots(ctx, placement, revisionHistoryLimit); err != nil { - return ctrl.Result{}, nil, err - } - latestResourceSnapshotIndex++ - } - // split selected resources as list of lists. - selectedResourcesList := controller.SplitSelectedResources(resourceSnapshotSpec.SelectedResources, resourceSnapshotResourceSizeLimit) - var resourceSnapshot fleetv1beta1.ResourceSnapshotObj - for i := resourceSnapshotStartIndex; i < len(selectedResourcesList); i++ { - if i == 0 { - resourceSnapshot = BuildMasterResourceSnapshot(latestResourceSnapshotIndex, len(selectedResourcesList), envelopeObjCount, placement.GetName(), placement.GetNamespace(), resourceHash, selectedResourcesList[i]) - latestResourceSnapshot = resourceSnapshot - } else { - resourceSnapshot = BuildSubIndexResourceSnapshot(latestResourceSnapshotIndex, i-1, placement.GetName(), placement.GetNamespace(), selectedResourcesList[i]) - } - if err = r.createResourceSnapshot(ctx, placement, resourceSnapshot); err != nil { - return ctrl.Result{}, nil, err - } - } - // shouldCreateNewMasterResourceSnapshot is used here to be defensive in case of the regression. - if shouldCreateNewMasterResourceSnapshot && len(selectedResourcesList) == 0 { - resourceSnapshot = BuildMasterResourceSnapshot(latestResourceSnapshotIndex, 1, envelopeObjCount, placement.GetName(), placement.GetNamespace(), resourceHash, []fleetv1beta1.ResourceContent{}) - latestResourceSnapshot = resourceSnapshot - if err = r.createResourceSnapshot(ctx, placement, resourceSnapshot); err != nil { - return ctrl.Result{}, nil, err - } - } - return ctrl.Result{}, latestResourceSnapshot, nil -} - -// shouldCreateNewResourceSnapshotNow checks whether it is ready to create the new resource snapshot to avoid too frequent creation -// based on the configured resourceSnapshotCreationMinimumInterval and resourceChangesCollectionDuration. -func (r *Reconciler) shouldCreateNewResourceSnapshotNow(ctx context.Context, latestResourceSnapshot fleetv1beta1.ResourceSnapshotObj) (ctrl.Result, error) { - if r.ResourceSnapshotCreationMinimumInterval <= 0 && r.ResourceChangesCollectionDuration <= 0 { - return ctrl.Result{}, nil - } - - // We respect the ResourceChangesCollectionDuration to allow the controller to bundle all the resource changes into one snapshot. - snapshotKObj := klog.KObj(latestResourceSnapshot) - now := time.Now() - nextResourceSnapshotCandidateDetectionTime, err := annotations.ExtractNextResourceSnapshotCandidateDetectionTimeFromResourceSnapshot(latestResourceSnapshot) - if nextResourceSnapshotCandidateDetectionTime.IsZero() || err != nil { - if err != nil { - klog.ErrorS(controller.NewUnexpectedBehaviorError(err), "Failed to get the NextResourceSnapshotCandidateDetectionTimeAnnotation", "resourceSnapshot", snapshotKObj) - } - // If the annotation is not set, set next resource snapshot candidate detection time is now. - if latestResourceSnapshot.GetAnnotations() == nil { - latestResourceSnapshot.SetAnnotations(make(map[string]string)) - } - latestResourceSnapshot.GetAnnotations()[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation] = now.Format(time.RFC3339) - if err := r.Client.Update(ctx, latestResourceSnapshot); err != nil { - klog.ErrorS(err, "Failed to update the NextResourceSnapshotCandidateDetectionTime annotation", "resourceSnapshot", snapshotKObj) - return ctrl.Result{}, controller.NewUpdateIgnoreConflictError(err) - } - nextResourceSnapshotCandidateDetectionTime = now - klog.V(2).InfoS("Updated the NextResourceSnapshotCandidateDetectionTime annotation", "resourceSnapshot", snapshotKObj, "nextResourceSnapshotCandidateDetectionTimeAnnotation", now.Format(time.RFC3339)) - } - nextCreationTime := fleettime.MaxTime(nextResourceSnapshotCandidateDetectionTime.Add(r.ResourceChangesCollectionDuration), latestResourceSnapshot.GetCreationTimestamp().Add(r.ResourceSnapshotCreationMinimumInterval)) - if now.Before(nextCreationTime) { - // If the next resource snapshot creation time is not reached, we requeue the request to avoid too frequent update. - klog.V(2).InfoS("Delaying the new resourceSnapshot creation", - "resourceSnapshot", snapshotKObj, "nextCreationTime", nextCreationTime, "latestResourceSnapshotCreationTime", latestResourceSnapshot.GetCreationTimestamp(), - "resourceSnapshotCreationMinimumInterval", r.ResourceSnapshotCreationMinimumInterval, "resourceChangesCollectionDuration", r.ResourceChangesCollectionDuration, - "afterDuration", nextCreationTime.Sub(now)) - return ctrl.Result{RequeueAfter: nextCreationTime.Sub(now)}, nil - } - return ctrl.Result{}, nil -} - -// TODO: move this to library package -// buildMasterResourceSnapshot builds and returns the master resource snapshot for the latest resource snapshot index and selected resources. -func BuildMasterResourceSnapshot(latestResourceSnapshotIndex, resourceSnapshotCount, envelopeObjCount int, placementName, placementNamespace, resourceHash string, selectedResources []fleetv1beta1.ResourceContent) fleetv1beta1.ResourceSnapshotObj { - labels := map[string]string{ - fleetv1beta1.PlacementTrackingLabel: placementName, - fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), - fleetv1beta1.ResourceIndexLabel: strconv.Itoa(latestResourceSnapshotIndex), - } - annotations := map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: strconv.Itoa(resourceSnapshotCount), - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: strconv.Itoa(envelopeObjCount), - } - spec := fleetv1beta1.ResourceSnapshotSpec{ - SelectedResources: selectedResources, - } - if placementNamespace == "" { - // Cluster-scoped placement - return &fleetv1beta1.ClusterResourceSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, placementName, latestResourceSnapshotIndex), - Labels: labels, - Annotations: annotations, - }, - Spec: spec, - } - } else { - // Namespace-scoped placement - return &fleetv1beta1.ResourceSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, placementName, latestResourceSnapshotIndex), - Namespace: placementNamespace, - Labels: labels, - Annotations: annotations, - }, - Spec: spec, - } - } -} - -// TODO: move this to library package -// BuildSubIndexResourceSnapshot builds and returns the sub index resource snapshot for both cluster-scoped and namespace-scoped placements. -// Returns a ClusterResourceSnapshot for cluster-scoped placements (empty namespace) or ResourceSnapshot for namespace-scoped placements. -func BuildSubIndexResourceSnapshot(latestResourceSnapshotIndex, resourceSnapshotSubIndex int, placementName, placementNamespace string, selectedResources []fleetv1beta1.ResourceContent) fleetv1beta1.ResourceSnapshotObj { - labels := map[string]string{ - fleetv1beta1.PlacementTrackingLabel: placementName, - fleetv1beta1.ResourceIndexLabel: strconv.Itoa(latestResourceSnapshotIndex), - } - annotations := map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: strconv.Itoa(resourceSnapshotSubIndex), - } - spec := fleetv1beta1.ResourceSnapshotSpec{ - SelectedResources: selectedResources, - } - if placementNamespace == "" { - // Cluster-scoped placement - return &fleetv1beta1.ClusterResourceSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, placementName, latestResourceSnapshotIndex, resourceSnapshotSubIndex), - Labels: labels, - Annotations: annotations, - }, - Spec: spec, - } - } else { - // Namespace-scoped placement - return &fleetv1beta1.ResourceSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, placementName, latestResourceSnapshotIndex, resourceSnapshotSubIndex), - Namespace: placementNamespace, - Labels: labels, - Annotations: annotations, - }, - Spec: spec, - } - } -} - -// createResourceSnapshot sets placement owner reference on the resource snapshot and creates it. -// Now supports both cluster-scoped and namespace-scoped placements using interface types. -func (r *Reconciler) createResourceSnapshot(ctx context.Context, placementObj fleetv1beta1.PlacementObj, resourceSnapshot fleetv1beta1.ResourceSnapshotObj) error { - resourceSnapshotKObj := klog.KObj(resourceSnapshot) - if err := controllerutil.SetControllerReference(placementObj, resourceSnapshot, r.Scheme); err != nil { - klog.ErrorS(err, "Failed to set owner reference", "resourceSnapshot", resourceSnapshotKObj) - // should never happen - return controller.NewUnexpectedBehaviorError(err) - } - if err := r.Client.Create(ctx, resourceSnapshot); err != nil { - klog.ErrorS(err, "Failed to create new resourceSnapshot", "resourceSnapshot", resourceSnapshotKObj) - return controller.NewAPIServerError(false, err) - } - klog.V(2).InfoS("Created new resourceSnapshot", "placement", klog.KObj(placementObj), "resourceSnapshot", resourceSnapshotKObj) - return nil -} - // ensureLatestPolicySnapshot ensures the latest policySnapshot has the isLatest label and the numberOfClusters are updated for interface types. func (r *Reconciler) ensureLatestPolicySnapshot(ctx context.Context, placementObj fleetv1beta1.PlacementObj, latest fleetv1beta1.PolicySnapshotObj) error { needUpdate := false @@ -780,26 +467,6 @@ func (r *Reconciler) ensureLatestPolicySnapshot(ctx context.Context, placementOb return nil } -// ensureLatestResourceSnapshot ensures the latest resourceSnapshot has the isLatest label, working with interface types. -func (r *Reconciler) ensureLatestResourceSnapshot(ctx context.Context, latest fleetv1beta1.ResourceSnapshotObj) error { - labels := latest.GetLabels() - if labels[fleetv1beta1.IsLatestSnapshotLabel] == strconv.FormatBool(true) { - return nil - } - // It could happen when the controller just sets the latest label to false for the old snapshot, and fails to - // create a new resource snapshot. - // And then the customers revert back their resource to the old one again. - // In this case, the "latest" snapshot without isLatest label has the same resource hash as the current one. - labels[fleetv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(true) - latest.SetLabels(labels) - if err := r.Client.Update(ctx, latest); err != nil { - klog.ErrorS(err, "Failed to update the resourceSnapshot", "resourceSnapshot", klog.KObj(latest)) - return controller.NewUpdateIgnoreConflictError(err) - } - klog.V(2).InfoS("ResourceSnapshot's IsLatestSnapshotLabel was updated to true", "resourceSnapshot", klog.KObj(latest)) - return nil -} - // lookupLatestSchedulingPolicySnapshot finds the latest snapshots and its policy index. // There will be only one active policy snapshot if exists. // It first checks whether there is an active policy snapshot. @@ -888,125 +555,6 @@ func (r *Reconciler) listSortedSchedulingPolicySnapshots(ctx context.Context, pl return snapshotList, nil } -// lookupLatestResourceSnapshot finds the latest snapshots and. -// There will be only one active resource snapshot if exists. -// It first checks whether there is an active resource snapshot. -// If not, it finds the one whose resourceIndex label is the largest. -// The resource index will always start from 0. -// lookupLatestResourceSnapshot finds the latest resource snapshots for the given placement. -// It works with both cluster-scoped (ClusterResourcePlacement) and namespace-scoped (ResourcePlacement) placements. -// There will be only one active resource snapshot if exists. -// It first checks whether there is an active resource snapshot. -// If not, it finds the one whose resourceIndex label is the largest. -// The resource index will always start from 0. -// Return error when 1) cannot list the snapshots 2) there are more than one active resource snapshots 3) snapshot has the -// invalid label value. -// 2 & 3 should never happen. -func (r *Reconciler) lookupLatestResourceSnapshot(ctx context.Context, placement fleetv1beta1.PlacementObj) (fleetv1beta1.ResourceSnapshotObj, int, error) { - placementKObj := klog.KObj(placement) - - // Use the existing FetchLatestMasterResourceSnapshot function to get the master snapshot - masterSnapshot, err := controller.FetchLatestMasterResourceSnapshot(ctx, r.Client, types.NamespacedName{Namespace: placement.GetNamespace(), Name: placement.GetName()}) - if err != nil { - return nil, -1, err - } - if masterSnapshot != nil { - // Extract resource index from the master snapshot - resourceIndex, err := labels.ExtractResourceIndexFromResourceSnapshot(masterSnapshot) - if err != nil { - klog.ErrorS(err, "Failed to parse the resource index label", "resourceSnapshot", klog.KObj(masterSnapshot)) - return nil, -1, controller.NewUnexpectedBehaviorError(err) - } - return masterSnapshot, resourceIndex, nil - } - // When there are no active snapshots, find the first snapshot who has the largest resource index. - // It should be rare only when placement is crashed before creating the new active snapshot. - sortedList, err := r.listSortedResourceSnapshots(ctx, placement) - if err != nil { - return nil, -1, err - } - if len(sortedList.GetResourceSnapshotObjs()) == 0 { - // The resource index of the first snapshot will start from 0. - return nil, -1, nil - } - latestSnapshot := sortedList.GetResourceSnapshotObjs()[len(sortedList.GetResourceSnapshotObjs())-1] - resourceIndex, err := labels.ExtractResourceIndexFromResourceSnapshot(latestSnapshot) - if err != nil { - klog.ErrorS(err, "Failed to parse the resource index label", "placement", placementKObj, "resourceSnapshot", klog.KObj(latestSnapshot)) - return nil, -1, controller.NewUnexpectedBehaviorError(err) - } - return latestSnapshot, resourceIndex, nil -} - -// listSortedResourceSnapshots returns the resource snapshots sorted by its index and its subindex. -// Now works with both cluster-scoped and namespaced resource snapshots using interface types. -// The resourceSnapshot is less than the other one when resourceIndex is less. -// When the resourceIndex is equal, then order by the subindex. -// Note: the snapshot does not have subindex is the largest of a group and there should be only one in a group. -func (r *Reconciler) listSortedResourceSnapshots(ctx context.Context, placementObj fleetv1beta1.PlacementObj) (fleetv1beta1.ResourceSnapshotObjList, error) { - placementKey := types.NamespacedName{ - Namespace: placementObj.GetNamespace(), - Name: placementObj.GetName(), - } - - snapshotList, err := controller.ListAllResourceSnapshots(ctx, r.Client, placementKey) - if err != nil { - klog.ErrorS(err, "Failed to list all resourceSnapshots", "placement", klog.KObj(placementObj)) - return nil, controller.NewAPIServerError(true, err) - } - - items := snapshotList.GetResourceSnapshotObjs() - var errs []error - sort.Slice(items, func(i, j int) bool { - iKObj := klog.KObj(items[i]) - jKObj := klog.KObj(items[j]) - ii, err := labels.ExtractResourceIndexFromResourceSnapshot(items[i]) - if err != nil { - klog.ErrorS(err, "Failed to parse the resource index label", "placement", klog.KObj(placementObj), "resourceSnapshot", iKObj) - errs = append(errs, err) - } - ji, err := labels.ExtractResourceIndexFromResourceSnapshot(items[j]) - if err != nil { - klog.ErrorS(err, "Failed to parse the resource index label", "placement", klog.KObj(placementObj), "resourceSnapshot", jKObj) - errs = append(errs, err) - } - if ii != ji { - return ii < ji - } - - iDoesExist, iSubindex, err := annotations.ExtractSubindexFromResourceSnapshot(items[i]) - if err != nil { - klog.ErrorS(err, "Failed to parse the subindex index", "placement", klog.KObj(placementObj), "resourceSnapshot", iKObj) - errs = append(errs, err) - } - jDoesExist, jSubindex, err := annotations.ExtractSubindexFromResourceSnapshot(items[j]) - if err != nil { - klog.ErrorS(err, "Failed to parse the subindex index", "placement", klog.KObj(placementObj), "resourceSnapshot", jKObj) - errs = append(errs, err) - } - - // Both of the snapshots do not have subindex, which should not happen. - if !iDoesExist && !jDoesExist { - klog.ErrorS(err, "There are more than one resource snapshot which do not have subindex in a group", "placement", klog.KObj(placementObj), "resourceSnapshot", iKObj, "resourceSnapshot", jKObj) - errs = append(errs, err) - } - - if !iDoesExist { // check if it's the first snapshot - return false - } - if !jDoesExist { // check if it's the first snapshot - return true - } - return iSubindex < jSubindex - }) - - if len(errs) > 0 { - return nil, controller.NewUnexpectedBehaviorError(utilerrors.NewAggregate(errs)) - } - - return snapshotList, nil -} - // TODO: further streamline the logic of setPlacementStatus // setPlacementStatus returns if there is a cluster scheduled by the scheduler. // it returns true if the cluster schedule succeeded, false otherwise. diff --git a/pkg/controllers/placement/controller_test.go b/pkg/controllers/placement/controller_test.go index 3aa9e0c02..50b3c0aae 100644 --- a/pkg/controllers/placement/controller_test.go +++ b/pkg/controllers/placement/controller_test.go @@ -23,7 +23,6 @@ import ( "fmt" "strconv" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -65,7 +64,6 @@ var ( } singleRevisionLimit = int32(1) multipleRevisionLimit = int32(2) - invalidRevisionLimit = int32(0) ) func serviceScheme(t *testing.T) *runtime.Scheme { @@ -1057,2065 +1055,6 @@ func TestGetOrCreateClusterSchedulingPolicySnapshot_failure(t *testing.T) { } } -func TestGetOrCreateClusterResourceSnapshot(t *testing.T) { - // test service is 383 bytes in size. - serviceResourceContent := *resource.ServiceResourceContentForTest(t) - // test deployment 390 bytes in size. - deploymentResourceContent := *resource.DeploymentResourceContentForTest(t) - // test secret is 152 bytes in size. - secretResourceContent := *resource.SecretResourceContentForTest(t) - - jsonBytes, err := json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}) - if err != nil { - t.Fatalf("failed to create the resourceSnapshotSpecWithSingleResourceHash hash: %v", err) - } - resourceSnapshotSpecWithEmptyResourceHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) - jsonBytes, err = json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}) - if err != nil { - t.Fatalf("failed to create the resourceSnapshotSpecWithSingleResource hash: %v", err) - } - resourceSnapshotSpecWithServiceResourceHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) - jsonBytes, err = json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}) - if err != nil { - t.Fatalf("failed to create the resourceSnapshotSpecWithMultipleResources hash: %v", err) - } - resourceSnapshotSpecWithTwoResourcesHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) - jsonBytes, err = json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent, deploymentResourceContent}}) - if err != nil { - t.Fatalf("failed to create the resourceSnapshotSpecWithMultipleResources hash: %v", err) - } - resourceSnapshotSpecWithMultipleResourcesHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) - now := metav1.Now() - nowToString := now.Time.Format(time.RFC3339) - tests := []struct { - name string - envelopeObjCount int - selectedResourcesSizeLimit int - resourceSnapshotSpec *fleetv1beta1.ResourceSnapshotSpec - revisionHistoryLimit *int32 - resourceSnapshots []fleetv1beta1.ClusterResourceSnapshot - wantResourceSnapshots []fleetv1beta1.ClusterResourceSnapshot - wantLatestSnapshotIndex int // index of the wantPolicySnapshots array - wantRequeue bool - }{ - { - name: "new resourceSnapshot and no existing snapshots owned by my-crp", - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - revisionHistoryLimit: &invalidRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "another-crp-1", - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: "another-crp", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - }, - }, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "another-crp-1", - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: "another-crp", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - }, - }, - }, - // new resource snapshot owned by the my-crp - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 1, - }, - { - name: "resource has no change", - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - revisionHistoryLimit: &singleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 0, - }, - { - name: "resource has changed and there is no active snapshot with single revisionLimit", - envelopeObjCount: 2, - // It happens when last reconcile loop fails after setting the latest label to false and - // before creating a new resource snapshot. - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - revisionHistoryLimit: &singleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - // new resource snapshot - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "2", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithEmptyResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "2", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - }, - wantLatestSnapshotIndex: 0, - }, - { - name: "resource has changed too fast and there is an active snapshot with multiple revisionLimit", - envelopeObjCount: 3, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - revisionHistoryLimit: &multipleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.IsLatestSnapshotLabel: "true", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.IsLatestSnapshotLabel: "true", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: nowToString, - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - }, - wantRequeue: true, - wantLatestSnapshotIndex: 0, - }, - { - name: "resource has changed and there is an active snapshot with multiple revisionLimit", - envelopeObjCount: 3, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - revisionHistoryLimit: &multipleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.IsLatestSnapshotLabel: "true", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), - }, - CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.IsLatestSnapshotLabel: "false", - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - // new resource snapshot - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithEmptyResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "3", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, - }, - }, - wantLatestSnapshotIndex: 3, - }, - { - name: "resource has been changed and reverted back and there is no active snapshot", - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 1, - }, - { - name: "selected resource cross clusterResourceSnapshot size limit, no existing clusterResourceSnapshots", - selectedResourcesSizeLimit: 600, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent, deploymentResourceContent}}, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 0, - }, - { - name: "selected resource cross clusterResourceSnapshot size limit, master clusterResourceSnapshot created but not all sub-indexed clusterResourceSnapshots have been created", - selectedResourcesSizeLimit: 100, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent, deploymentResourceContent}}, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 0, - }, - { - name: "selected resources cross clusterResourceSnapshot limit, revision limit is 1, delete existing clusterResourceSnapshots & create new clusterResourceSnapshots", - selectedResourcesSizeLimit: 100, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, - revisionHistoryLimit: &singleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), - }, - CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 0, - }, - { - name: "resource has changed too fast, selected resources cross clusterResourceSnapshot limit, revision limit is 1", - selectedResourcesSizeLimit: 100, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, - revisionHistoryLimit: &singleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: nowToString, - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, - }, - }, - wantRequeue: true, - wantLatestSnapshotIndex: 0, - }, - { - name: "selected resources cross clusterResourceSnapshot limit, revision limit is 1, delete existing clusterResourceSnapshot with missing sub-indexed snapshots & create new clusterResourceSnapshots", - selectedResourcesSizeLimit: 100, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - revisionHistoryLimit: &singleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), - }, - CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 0, - }, - { - name: "selected resources cross clusterResourceSnapshot limit, revision limit is 2, don't delete existing clusterResourceSnapshots & create new clusterResourceSnapshots", - selectedResourcesSizeLimit: 100, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, - revisionHistoryLimit: &multipleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), - }, - CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "false", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "2", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 2, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "2", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 1, - }, - { - name: "selected resource cross clusterResourceSnapshot size limit, all clusterResourceSnapshots remain the same since no change", - selectedResourcesSizeLimit: 100, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, - revisionHistoryLimit: &singleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 0, - }, - { - name: "selected resource cross clusterResourceSnapshot size limit, all clusterResourceSnapshots remain the same, but IsLatestSnapshotLabel is set to false", - selectedResourcesSizeLimit: 100, - resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, - revisionHistoryLimit: &multipleRevisionLimit, - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "false", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - CreationTimestamp: now, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "false", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - }, - wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "false", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", - }, - }, - Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, - }, - }, - wantLatestSnapshotIndex: 1, - }, - } - originalResourceSnapshotResourceSizeLimit := resourceSnapshotResourceSizeLimit - defer func() { - resourceSnapshotResourceSizeLimit = originalResourceSnapshotResourceSizeLimit - }() - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - crp := clusterResourcePlacementForTest() - crp.Spec.RevisionHistoryLimit = tc.revisionHistoryLimit - objects := []client.Object{crp} - for i := range tc.resourceSnapshots { - objects = append(objects, &tc.resourceSnapshots[i]) - } - scheme := serviceScheme(t) - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(objects...). - Build() - r := Reconciler{ - Client: fakeClient, - Scheme: scheme, - Recorder: record.NewFakeRecorder(10), - ResourceSnapshotCreationMinimumInterval: 1 * time.Minute, - } - limit := int32(defaulter.DefaultRevisionHistoryLimitValue) - if tc.revisionHistoryLimit != nil { - limit = *tc.revisionHistoryLimit - } - resourceSnapshotResourceSizeLimit = tc.selectedResourcesSizeLimit - res, got, err := r.getOrCreateResourceSnapshot(ctx, crp, tc.envelopeObjCount, tc.resourceSnapshotSpec, int(limit)) - if err != nil { - t.Fatalf("failed to handle getOrCreateResourceSnapshot: %v", err) - } - if (res.RequeueAfter > 0) != tc.wantRequeue { - t.Fatalf("getOrCreateResourceSnapshot() got Requeue %v, want %v", (res.RequeueAfter > 0), tc.wantRequeue) - } - - options := []cmp.Option{ - cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion", "CreationTimestamp"), - // Fake API server will add a newline for the runtime.RawExtension type. - // ignoring the resourceContent field for now - cmpopts.IgnoreFields(runtime.RawExtension{}, "Raw"), - } - if tc.wantRequeue { - if res.RequeueAfter <= 0 { - t.Fatalf("getOrCreateResourceSnapshot() got RequeueAfter %v, want greater than zero value", res.RequeueAfter) - } - } - annotationOption := cmp.Transformer("NormalizeAnnotations", func(m map[string]string) map[string]string { - normalized := map[string]string{} - for k, v := range m { - if k == fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation { - // Normalize the resource group hash annotation to a fixed value for comparison. - if _, err := time.Parse(time.RFC3339, v); err != nil { - normalized[k] = "" - } - normalized[k] = nowToString - } else { - normalized[k] = v - } - } - return normalized - }) - options = append(options, sortClusterResourceSnapshotOption, annotationOption) - gotSnapshot, ok := got.(*fleetv1beta1.ClusterResourceSnapshot) - if !ok { - t.Fatalf("expected *fleetv1beta1.ClusterResourceSnapshot, got %T", got) - } - if diff := cmp.Diff(tc.wantResourceSnapshots[tc.wantLatestSnapshotIndex], *gotSnapshot, options...); diff != "" { - t.Errorf("getOrCreateResourceSnapshot() mismatch (-want, +got):\n%s", diff) - } - clusterResourceSnapshotList := &fleetv1beta1.ClusterResourceSnapshotList{} - if err := fakeClient.List(ctx, clusterResourceSnapshotList); err != nil { - t.Fatalf("clusterResourceSnapshot List() got error %v, want no error", err) - } - if diff := cmp.Diff(tc.wantResourceSnapshots, clusterResourceSnapshotList.Items, options...); diff != "" { - t.Errorf("clusterResourceSnapshot List() mismatch (-want, +got):\n%s", diff) - } - }) - } -} - -func TestGetOrCreateClusterResourceSnapshot_failure(t *testing.T) { - selectedResources := []fleetv1beta1.ResourceContent{ - *resource.ServiceResourceContentForTest(t), - } - resourceSnapshotSpecA := &fleetv1beta1.ResourceSnapshotSpec{ - SelectedResources: selectedResources, - } - tests := []struct { - name string - resourceSnapshots []fleetv1beta1.ClusterResourceSnapshot - }{ - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. - name: "existing active resource snapshot does not have resourceIndex label", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - }, - }, - }, - }, - }, - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. - name: "existing active resource snapshot does not have hash annotation", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "0", - }, - }, - }, - }, - }, - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. - name: "no active resource snapshot exists and resourceSnapshot with invalid resourceIndex label", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "abc", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - }, - }, - }, - }, - }, - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. - name: "no active resource snapshot exists and multiple resourceSnapshots with invalid resourceIndex label", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "abc", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "abc", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - }, - }, - }, - }, - }, - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. - name: "no active resource snapshot exists and multiple resourceSnapshots with invalid subindex annotation", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "0", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "0", - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "abc", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "1", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "abc", - }, - }, - }, - }, - }, - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. - name: "no active resource snapshot exists and multiple resourceSnapshots with invalid subindex (<0) annotation", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "0", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "0", - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "-1", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.PlacementTrackingLabel: testCRPName, - fleetv1beta1.ResourceIndexLabel: "1", - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "abc", - fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "-1", - }, - }, - }, - }, - }, - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. - name: "multiple active resource snapshot exist", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "0", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "hashA", - }, - }, - Spec: *resourceSnapshotSpecA, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "1", - fleetv1beta1.IsLatestSnapshotLabel: "true", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "hashA", - }, - }, - Spec: *resourceSnapshotSpecA, - }, - }, - }, - { - // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterPolicySnapshot. - name: "no active resource snapshot exists and resourceSnapshot with invalid resourceIndex label (negative value)", - resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), - Labels: map[string]string{ - fleetv1beta1.ResourceIndexLabel: "-12", - fleetv1beta1.PlacementTrackingLabel: testCRPName, - }, - OwnerReferences: []metav1.OwnerReference{ - { - Name: testCRPName, - BlockOwnerDeletion: ptr.To(true), - Controller: ptr.To(true), - APIVersion: fleetAPIVersion, - Kind: "ClusterResourcePlacement", - }, - }, - Annotations: map[string]string{ - fleetv1beta1.ResourceGroupHashAnnotation: "hashA", - }, - }, - Spec: *resourceSnapshotSpecA, - }, - }, - }, - } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - crp := clusterResourcePlacementForTest() - objects := []client.Object{crp} - for i := range tc.resourceSnapshots { - objects = append(objects, &tc.resourceSnapshots[i]) - } - scheme := serviceScheme(t) - fakeClient := fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(objects...). - Build() - r := Reconciler{ - Client: fakeClient, - Scheme: scheme, - } - res, _, err := r.getOrCreateResourceSnapshot(ctx, crp, 0, resourceSnapshotSpecA, 1) - if err == nil { // if error is nil - t.Fatal("getOrCreateClusterResourceSnapshot() = nil, want err") - } - if res.RequeueAfter > 0 { - t.Fatal("getOrCreateClusterResourceSnapshot() requeue = true, want false") - } - if !errors.Is(err, controller.ErrUnexpectedBehavior) { - t.Errorf("getOrCreateClusterResourceSnapshot() got %v, want %v type", err, controller.ErrUnexpectedBehavior) - } - }) - } -} - func TestHandleDelete(t *testing.T) { tests := []struct { name string @@ -4451,130 +2390,3 @@ func TestDetermineRolloutStateForPlacementWithExternalRolloutStrategy(t *testing }) } } - -func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { - now := time.Now() - - cases := []struct { - name string - creationInterval time.Duration - collectionDuration time.Duration - creationTime time.Time - annotationValue string - wantAnnoation bool - wantRequeue ctrl.Result - }{ - { - name: "ResourceSnapshotCreationMinimumInterval and ResourceChangesCollectionDuration are 0", - creationInterval: 0, - collectionDuration: 0, - wantRequeue: ctrl.Result{Requeue: false}, - }, - { - name: "ResourceSnapshotCreationMinimumInterval is 0", - creationInterval: 0, - collectionDuration: 30 * time.Second, - annotationValue: now.Add(-10 * time.Second).Format(time.RFC3339), - wantAnnoation: true, - wantRequeue: ctrl.Result{RequeueAfter: 20 * time.Second}, - }, - { - name: "ResourceChangesCollectionDuration is 0", - creationInterval: 300 * time.Second, - collectionDuration: 0, - creationTime: now.Add(-5 * time.Second), - // no annotation → sets it and requeues - annotationValue: "", - wantAnnoation: true, - wantRequeue: ctrl.Result{RequeueAfter: 295 * time.Second}, - }, - { - name: "next detection time (now) + collection duration < latest resource snapshot creation time + creation interval", - creationInterval: 300 * time.Second, - collectionDuration: 30 * time.Second, - creationTime: now.Add(-5 * time.Second), - // no annotation → sets it and requeues - annotationValue: "", - wantAnnoation: true, - wantRequeue: ctrl.Result{RequeueAfter: 295 * time.Second}, - }, - { - name: "next detection time (annotation) + collection duration < latest resource snapshot creation time + creation interval", - creationInterval: 300 * time.Second, - collectionDuration: 30 * time.Second, - creationTime: now.Add(-10 * time.Second), - annotationValue: now.Add(-5 * time.Second).Format(time.RFC3339), - wantAnnoation: true, - wantRequeue: ctrl.Result{RequeueAfter: 290 * time.Second}, - }, - { - name: "last resource snapshot created long time before", - creationInterval: 60 * time.Second, - collectionDuration: 30 * time.Second, - creationTime: now.Add(-1 * time.Hour), - wantAnnoation: true, - wantRequeue: ctrl.Result{RequeueAfter: 30 * time.Second}, - }, - { - name: "next detection time (now) + collection duration >= latest resource snapshot creation time + creation interval", - creationInterval: 60 * time.Second, - collectionDuration: 60 * time.Second, - creationTime: now.Add(-40 * time.Second), - wantAnnoation: true, - wantRequeue: ctrl.Result{RequeueAfter: 60 * time.Second}, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - // initialize a snapshot with given creation time and annotation - snapshot := &fleetv1beta1.ClusterResourceSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-snapshot", - CreationTimestamp: metav1.Time{Time: tc.creationTime}, - Annotations: map[string]string{}, - }, - } - if tc.annotationValue != "" { - snapshot.Annotations[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation] = tc.annotationValue - } - - // use fake client seeded with the snapshot - scheme := serviceScheme(t) - client := fake.NewClientBuilder(). - WithScheme(scheme). - WithRuntimeObjects(snapshot.DeepCopy()). - Build() - - r := &Reconciler{ - Client: client, - ResourceSnapshotCreationMinimumInterval: tc.creationInterval, - ResourceChangesCollectionDuration: tc.collectionDuration, - } - - ctx := context.Background() - if err := client.Get(ctx, types.NamespacedName{Name: snapshot.Name}, snapshot); err != nil { - t.Fatalf("Failed to get snapshot: %v", err) - } - got, err := r.shouldCreateNewResourceSnapshotNow(ctx, snapshot) - if err != nil { - t.Fatalf("shouldCreateNewResourceSnapshotNow() failed: %v", err) - } - cmpOptions := []cmp.Option{cmp.Comparer(func(d1, d2 time.Duration) bool { - if d1 == 0 { - return d2 == 0 // both are zero - } - return time.Duration.Abs(d1-d2) < 3*time.Second // allow 1 second difference - })} - if !cmp.Equal(got, tc.wantRequeue, cmpOptions...) { - t.Errorf("shouldCreateNewResourceSnapshotNow() = %v, want %v", got, tc.wantRequeue) - } - if err := client.Get(ctx, types.NamespacedName{Name: snapshot.Name}, snapshot); err != nil { - t.Fatalf("failed to get snapshot after shouldCreateNewResourceSnapshotNow: %v", err) - } - if gotAnnotation := len(snapshot.Annotations[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation]) != 0; tc.wantAnnoation != gotAnnotation { - t.Errorf("shouldCreateNewResourceSnapshotNow() = annotation %v, want %v", snapshot.Annotations[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation], tc.wantAnnoation) - } - }) - } -} diff --git a/pkg/controllers/placement/suite_test.go b/pkg/controllers/placement/suite_test.go index 641cbc370..d238bb0c7 100644 --- a/pkg/controllers/placement/suite_test.go +++ b/pkg/controllers/placement/suite_test.go @@ -119,12 +119,14 @@ var _ = BeforeSuite(func() { "default": true, }, } + resourceSnapshotResolver := controller.NewResourceSnapshotResolver(mgr.GetClient(), mgr.GetScheme()) reconciler := &Reconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), UncachedReader: mgr.GetAPIReader(), Recorder: mgr.GetEventRecorderFor(controllerName), ResourceSelectorResolver: resourceSelectorResolver, + ResourceSnapshotResolver: *resourceSnapshotResolver, } opts := options.RateLimitOptions{ RateLimiterBaseDelay: 5 * time.Millisecond, diff --git a/pkg/utils/controller/resource_snapshot_resolver.go b/pkg/utils/controller/resource_snapshot_resolver.go index 760eb7f43..b0d55d726 100644 --- a/pkg/utils/controller/resource_snapshot_resolver.go +++ b/pkg/utils/controller/resource_snapshot_resolver.go @@ -19,18 +19,432 @@ package controller import ( "context" "fmt" + "sort" "strconv" + "time" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" "github.com/kubefleet-dev/kubefleet/pkg/scheduler/queue" + "github.com/kubefleet-dev/kubefleet/pkg/utils/annotations" "github.com/kubefleet-dev/kubefleet/pkg/utils/labels" + "github.com/kubefleet-dev/kubefleet/pkg/utils/resource" + fleettime "github.com/kubefleet-dev/kubefleet/pkg/utils/time" ) +// The max size of an object in k8s is 1.5MB because of ETCD limit https://etcd.io/docs/v3.3/dev-guide/limit/. +// We choose 800KB as the soft limit for all the selected resources within one resourceSnapshot object because of this test in k8s which checks +// if object size is greater than 1MB https://github.com/kubernetes/kubernetes/blob/db1990f48b92d603f469c1c89e2ad36da1b74846/test/integration/master/synthetic_master_test.go#L337 +var resourceSnapshotResourceSizeLimit = 800 * (1 << 10) // 800KB + +type ResourceSnapshotResolver struct { + Client client.Client + Scheme *runtime.Scheme + + // Config provides configuration functions for snapshot behavior. + // If nil, default behavior (no timing restrictions) is used. + Config *ResourceSnapshotConfig +} + +// NewResourceSnapshotResolver creates a new ResourceSnapshotResolver with the universal fields +func NewResourceSnapshotResolver(client client.Client, scheme *runtime.Scheme) *ResourceSnapshotResolver { + return &ResourceSnapshotResolver{ + Client: client, + Scheme: scheme, + } +} + +// ResourceSnapshotConfig defines timing parameters for resource snapshot management. +type ResourceSnapshotConfig struct { + // ResourceSnapshotCreationMinimumInterval is the minimum interval to create a new resourcesnapshot + // to avoid too frequent updates. + ResourceSnapshotCreationMinimumInterval time.Duration + + // ResourceChangesCollectionDuration is the duration for collecting resource changes into one snapshot. + ResourceChangesCollectionDuration time.Duration +} + +// NewResourceSnapshotConfig creates a ResourceSnapshotConfig with static values +func NewResourceSnapshotConfig(creationInterval, collectionDuration time.Duration) *ResourceSnapshotConfig { + return &ResourceSnapshotConfig{ + ResourceSnapshotCreationMinimumInterval: creationInterval, + ResourceChangesCollectionDuration: collectionDuration, + } +} + +// GetOrCreateResourceSnapshot gets or creates a resource snapshot for the given placement. +// It returns the latest resource snapshot if it exists and is up to date, otherwise it creates a new one. +// It also returns the ctrl.Result to indicate whether the request should be requeued or not. +// Note: when the ctrl.Result.Requeue is true, it still returns the current latest resourceSnapshot so that +// placement can update the rollout status. +func (r *ResourceSnapshotResolver) GetOrCreateResourceSnapshot(ctx context.Context, placement fleetv1beta1.PlacementObj, envelopeObjCount int, resourceSnapshotSpec *fleetv1beta1.ResourceSnapshotSpec, revisionHistoryLimit int) (ctrl.Result, fleetv1beta1.ResourceSnapshotObj, error) { + placementKObj := klog.KObj(placement) + resourceHash, err := resource.HashOf(resourceSnapshotSpec) + if err != nil { + klog.ErrorS(err, "Failed to generate resource hash", "placement", placementKObj) + return ctrl.Result{}, nil, NewUnexpectedBehaviorError(err) + } + + // latestResourceSnapshotIndex should be -1 when there is no snapshot. + latestResourceSnapshot, latestResourceSnapshotIndex, err := r.lookupLatestResourceSnapshot(ctx, placement) + if err != nil { + return ctrl.Result{}, nil, err + } + + latestResourceSnapshotHash := "" + numberOfSnapshots := -1 + if latestResourceSnapshot != nil { + latestResourceSnapshotHash, err = annotations.ParseResourceGroupHashFromAnnotation(latestResourceSnapshot) + if err != nil { + klog.ErrorS(err, "Failed to get the ResourceGroupHashAnnotation", "resourceSnapshot", klog.KObj(latestResourceSnapshot)) + return ctrl.Result{}, nil, NewUnexpectedBehaviorError(err) + } + numberOfSnapshots, err = annotations.ExtractNumberOfResourceSnapshotsFromResourceSnapshot(latestResourceSnapshot) + if err != nil { + klog.ErrorS(err, "Failed to get the NumberOfResourceSnapshotsAnnotation", "resourceSnapshot", klog.KObj(latestResourceSnapshot)) + return ctrl.Result{}, nil, NewUnexpectedBehaviorError(err) + } + } + + shouldCreateNewMasterResourceSnapshot := true + // This index indicates the selected resource in the split selectedResourceList, if this index is zero we start + // from creating the master resourceSnapshot if it's greater than zero it means that the master resourceSnapshot + // got created but not all sub-indexed resourceSnapshots have been created yet. It covers the corner case where the + // controller crashes in the middle. + resourceSnapshotStartIndex := 0 + if latestResourceSnapshot != nil && latestResourceSnapshotHash == resourceHash { + if err := r.ensureLatestResourceSnapshot(ctx, latestResourceSnapshot); err != nil { + return ctrl.Result{}, nil, err + } + // check to see all that the master cluster resource snapshot and sub-indexed snapshots belonging to the same group index exists. + resourceSnapshotList, err := ListAllResourceSnapshotWithAnIndex(ctx, r.Client, latestResourceSnapshot.GetLabels()[fleetv1beta1.ResourceIndexLabel], placement.GetName(), placement.GetNamespace()) + if err != nil { + klog.ErrorS(err, "Failed to list the latest group resourceSnapshots associated with the placement", "placement", placementKObj) + return ctrl.Result{}, nil, NewAPIServerError(true, err) + } + if len(resourceSnapshotList.GetResourceSnapshotObjs()) == numberOfSnapshots { + klog.V(2).InfoS("resourceSnapshots have not changed", "placement", placementKObj, "resourceSnapshot", klog.KObj(latestResourceSnapshot)) + return ctrl.Result{}, latestResourceSnapshot, nil + } + // we should not create a new master cluster resource snapshot. + shouldCreateNewMasterResourceSnapshot = false + // set resourceSnapshotStartIndex to start from this index, so we don't try to recreate existing sub-indexed cluster resource snapshots. + resourceSnapshotStartIndex = len(resourceSnapshotList.GetResourceSnapshotObjs()) + } + + // Need to create new snapshot when 1) there is no snapshots or 2) the latest snapshot hash != current one. + // mark the last resource snapshot as inactive if it is different from what we have now or 3) when some + // sub-indexed cluster resource snapshots belonging to the same group have not been created, the master + // cluster resource snapshot should exist and be latest. + if latestResourceSnapshot != nil && latestResourceSnapshotHash != resourceHash && latestResourceSnapshot.GetLabels()[fleetv1beta1.IsLatestSnapshotLabel] == strconv.FormatBool(true) { + // When the latest resource snapshot without the isLastest label, it means it fails to create the new + // resource snapshot in the last reconcile and we don't need to check and delay the request. + res, error := r.shouldCreateNewResourceSnapshotNow(ctx, latestResourceSnapshot) + if error != nil { + return ctrl.Result{}, nil, error + } + if res.RequeueAfter > 0 { + // If the latest resource snapshot is not ready to be updated, we requeue the request. + return res, latestResourceSnapshot, nil + } + shouldCreateNewMasterResourceSnapshot = true + // set the latest label to false first to make sure there is only one or none active resource snapshot + labels := latestResourceSnapshot.GetLabels() + labels[fleetv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(false) + latestResourceSnapshot.SetLabels(labels) + if err := r.Client.Update(ctx, latestResourceSnapshot); err != nil { + klog.ErrorS(err, "Failed to set the isLatestSnapshot label to false", "resourceSnapshot", klog.KObj(latestResourceSnapshot)) + return ctrl.Result{}, nil, NewUpdateIgnoreConflictError(err) + } + klog.V(2).InfoS("Marked the existing resourceSnapshot as inactive", "placement", placementKObj, "resourceSnapshot", klog.KObj(latestResourceSnapshot)) + } + + // only delete redundant resource snapshots and increment the latest resource snapshot index if new master resource snapshot is to be created. + if shouldCreateNewMasterResourceSnapshot { + // delete redundant snapshot revisions before creating a new master resource snapshot to guarantee that the number of snapshots won't exceed the limit. + if err := r.deleteRedundantResourceSnapshots(ctx, placement, revisionHistoryLimit); err != nil { + return ctrl.Result{}, nil, err + } + latestResourceSnapshotIndex++ + } + // split selected resources as list of lists. + selectedResourcesList := SplitSelectedResources(resourceSnapshotSpec.SelectedResources, resourceSnapshotResourceSizeLimit) + var resourceSnapshot fleetv1beta1.ResourceSnapshotObj + for i := resourceSnapshotStartIndex; i < len(selectedResourcesList); i++ { + if i == 0 { + resourceSnapshot = BuildMasterResourceSnapshot(latestResourceSnapshotIndex, len(selectedResourcesList), envelopeObjCount, placement.GetName(), placement.GetNamespace(), resourceHash, selectedResourcesList[i]) + latestResourceSnapshot = resourceSnapshot + } else { + resourceSnapshot = BuildSubIndexResourceSnapshot(latestResourceSnapshotIndex, i-1, placement.GetName(), placement.GetNamespace(), selectedResourcesList[i]) + } + if err = r.createResourceSnapshot(ctx, placement, resourceSnapshot); err != nil { + return ctrl.Result{}, nil, err + } + } + // shouldCreateNewMasterResourceSnapshot is used here to be defensive in case of the regression. + if shouldCreateNewMasterResourceSnapshot && len(selectedResourcesList) == 0 { + resourceSnapshot = BuildMasterResourceSnapshot(latestResourceSnapshotIndex, 1, envelopeObjCount, placement.GetName(), placement.GetNamespace(), resourceHash, []fleetv1beta1.ResourceContent{}) + latestResourceSnapshot = resourceSnapshot + if err = r.createResourceSnapshot(ctx, placement, resourceSnapshot); err != nil { + return ctrl.Result{}, nil, err + } + } + return ctrl.Result{}, latestResourceSnapshot, nil +} + +// lookupLatestResourceSnapshot finds the latest snapshots and. +// There will be only one active resource snapshot if exists. +// It first checks whether there is an active resource snapshot. +// If not, it finds the one whose resourceIndex label is the largest. +// The resource index will always start from 0. +// lookupLatestResourceSnapshot finds the latest resource snapshots for the given placement. +// It works with both cluster-scoped (ClusterResourcePlacement) and namespace-scoped (ResourcePlacement) placements. +// There will be only one active resource snapshot if exists. +// It first checks whether there is an active resource snapshot. +// If not, it finds the one whose resourceIndex label is the largest. +// The resource index will always start from 0. +// Return error when 1) cannot list the snapshots 2) there are more than one active resource snapshots 3) snapshot has the +// invalid label value. +// 2 & 3 should never happen. +func (r *ResourceSnapshotResolver) lookupLatestResourceSnapshot(ctx context.Context, placement fleetv1beta1.PlacementObj) (fleetv1beta1.ResourceSnapshotObj, int, error) { + placementKObj := klog.KObj(placement) + + // Use the existing FetchLatestMasterResourceSnapshot function to get the master snapshot + masterSnapshot, err := FetchLatestMasterResourceSnapshot(ctx, r.Client, types.NamespacedName{Namespace: placement.GetNamespace(), Name: placement.GetName()}) + if err != nil { + return nil, -1, err + } + if masterSnapshot != nil { + // Extract resource index from the master snapshot + resourceIndex, err := labels.ExtractResourceIndexFromResourceSnapshot(masterSnapshot) + if err != nil { + klog.ErrorS(err, "Failed to parse the resource index label", "resourceSnapshot", klog.KObj(masterSnapshot)) + return nil, -1, NewUnexpectedBehaviorError(err) + } + return masterSnapshot, resourceIndex, nil + } + // When there are no active snapshots, find the first snapshot who has the largest resource index. + // It should be rare only when placement is crashed before creating the new active snapshot. + sortedList, err := r.listSortedResourceSnapshots(ctx, placement) + if err != nil { + return nil, -1, err + } + if len(sortedList.GetResourceSnapshotObjs()) == 0 { + // The resource index of the first snapshot will start from 0. + return nil, -1, nil + } + latestSnapshot := sortedList.GetResourceSnapshotObjs()[len(sortedList.GetResourceSnapshotObjs())-1] + resourceIndex, err := labels.ExtractResourceIndexFromResourceSnapshot(latestSnapshot) + if err != nil { + klog.ErrorS(err, "Failed to parse the resource index label", "placement", placementKObj, "resourceSnapshot", klog.KObj(latestSnapshot)) + return nil, -1, NewUnexpectedBehaviorError(err) + } + return latestSnapshot, resourceIndex, nil +} + +// listSortedResourceSnapshots returns the resource snapshots sorted by its index and its subindex. +// Now works with both cluster-scoped and namespaced resource snapshots using interface types. +// The resourceSnapshot is less than the other one when resourceIndex is less. +// When the resourceIndex is equal, then order by the subindex. +// Note: the snapshot does not have subindex is the largest of a group and there should be only one in a group. +func (r *ResourceSnapshotResolver) listSortedResourceSnapshots(ctx context.Context, placementObj fleetv1beta1.PlacementObj) (fleetv1beta1.ResourceSnapshotObjList, error) { + placementKey := types.NamespacedName{ + Namespace: placementObj.GetNamespace(), + Name: placementObj.GetName(), + } + + snapshotList, err := ListAllResourceSnapshots(ctx, r.Client, placementKey) + if err != nil { + klog.ErrorS(err, "Failed to list all resourceSnapshots", "placement", klog.KObj(placementObj)) + return nil, NewAPIServerError(true, err) + } + + items := snapshotList.GetResourceSnapshotObjs() + var errs []error + sort.Slice(items, func(i, j int) bool { + iKObj := klog.KObj(items[i]) + jKObj := klog.KObj(items[j]) + ii, err := labels.ExtractResourceIndexFromResourceSnapshot(items[i]) + if err != nil { + klog.ErrorS(err, "Failed to parse the resource index label", "placement", klog.KObj(placementObj), "resourceSnapshot", iKObj) + errs = append(errs, err) + } + ji, err := labels.ExtractResourceIndexFromResourceSnapshot(items[j]) + if err != nil { + klog.ErrorS(err, "Failed to parse the resource index label", "placement", klog.KObj(placementObj), "resourceSnapshot", jKObj) + errs = append(errs, err) + } + if ii != ji { + return ii < ji + } + + iDoesExist, iSubindex, err := annotations.ExtractSubindexFromResourceSnapshot(items[i]) + if err != nil { + klog.ErrorS(err, "Failed to parse the subindex index", "placement", klog.KObj(placementObj), "resourceSnapshot", iKObj) + errs = append(errs, err) + } + jDoesExist, jSubindex, err := annotations.ExtractSubindexFromResourceSnapshot(items[j]) + if err != nil { + klog.ErrorS(err, "Failed to parse the subindex index", "placement", klog.KObj(placementObj), "resourceSnapshot", jKObj) + errs = append(errs, err) + } + + // Both of the snapshots do not have subindex, which should not happen. + if !iDoesExist && !jDoesExist { + klog.ErrorS(err, "There are more than one resource snapshot which do not have subindex in a group", "placement", klog.KObj(placementObj), "resourceSnapshot", iKObj, "resourceSnapshot", jKObj) + errs = append(errs, err) + } + + if !iDoesExist { // check if it's the first snapshot + return false + } + if !jDoesExist { // check if it's the first snapshot + return true + } + return iSubindex < jSubindex + }) + + if len(errs) > 0 { + return nil, NewUnexpectedBehaviorError(utilerrors.NewAggregate(errs)) + } + + return snapshotList, nil +} + +// ensureLatestResourceSnapshot ensures the latest resourceSnapshot has the isLatest label, working with interface types. +func (r *ResourceSnapshotResolver) ensureLatestResourceSnapshot(ctx context.Context, latest fleetv1beta1.ResourceSnapshotObj) error { + labels := latest.GetLabels() + if labels[fleetv1beta1.IsLatestSnapshotLabel] == strconv.FormatBool(true) { + return nil + } + // It could happen when the controller just sets the latest label to false for the old snapshot, and fails to + // create a new resource snapshot. + // And then the customers revert back their resource to the old one again. + // In this case, the "latest" snapshot without isLatest label has the same resource hash as the current one. + labels[fleetv1beta1.IsLatestSnapshotLabel] = strconv.FormatBool(true) + latest.SetLabels(labels) + if err := r.Client.Update(ctx, latest); err != nil { + klog.ErrorS(err, "Failed to update the resourceSnapshot", "resourceSnapshot", klog.KObj(latest)) + return NewUpdateIgnoreConflictError(err) + } + klog.V(2).InfoS("ResourceSnapshot's IsLatestSnapshotLabel was updated to true", "resourceSnapshot", klog.KObj(latest)) + return nil +} + +// shouldCreateNewResourceSnapshotNow checks whether it is ready to create the new resource snapshot to avoid too frequent creation +// based on the configured resourceSnapshotCreationMinimumInterval and resourceChangesCollectionDuration. +func (r *ResourceSnapshotResolver) shouldCreateNewResourceSnapshotNow(ctx context.Context, latestResourceSnapshot fleetv1beta1.ResourceSnapshotObj) (ctrl.Result, error) { + if r.Config != nil && r.Config.ResourceSnapshotCreationMinimumInterval <= 0 && r.Config.ResourceChangesCollectionDuration <= 0 { + return ctrl.Result{}, nil + } + + // We respect the ResourceChangesCollectionDuration to allow the controller to bundle all the resource changes into one snapshot. + snapshotKObj := klog.KObj(latestResourceSnapshot) + now := time.Now() + nextResourceSnapshotCandidateDetectionTime, err := annotations.ExtractNextResourceSnapshotCandidateDetectionTimeFromResourceSnapshot(latestResourceSnapshot) + if nextResourceSnapshotCandidateDetectionTime.IsZero() || err != nil { + if err != nil { + klog.ErrorS(NewUnexpectedBehaviorError(err), "Failed to get the NextResourceSnapshotCandidateDetectionTimeAnnotation", "resourceSnapshot", snapshotKObj) + } + // If the annotation is not set, set next resource snapshot candidate detection time is now. + if latestResourceSnapshot.GetAnnotations() == nil { + latestResourceSnapshot.SetAnnotations(make(map[string]string)) + } + latestResourceSnapshot.GetAnnotations()[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation] = now.Format(time.RFC3339) + if err := r.Client.Update(ctx, latestResourceSnapshot); err != nil { + klog.ErrorS(err, "Failed to update the NextResourceSnapshotCandidateDetectionTime annotation", "resourceSnapshot", snapshotKObj) + return ctrl.Result{}, NewUpdateIgnoreConflictError(err) + } + nextResourceSnapshotCandidateDetectionTime = now + klog.V(2).InfoS("Updated the NextResourceSnapshotCandidateDetectionTime annotation", "resourceSnapshot", snapshotKObj, "nextResourceSnapshotCandidateDetectionTimeAnnotation", now.Format(time.RFC3339)) + } + nextCreationTime := fleettime.MaxTime(nextResourceSnapshotCandidateDetectionTime.Add(r.Config.ResourceChangesCollectionDuration), latestResourceSnapshot.GetCreationTimestamp().Add(r.Config.ResourceSnapshotCreationMinimumInterval)) + if now.Before(nextCreationTime) { + // If the next resource snapshot creation time is not reached, we requeue the request to avoid too frequent update. + klog.V(2).InfoS("Delaying the new resourceSnapshot creation", + "resourceSnapshot", snapshotKObj, "nextCreationTime", nextCreationTime, "latestResourceSnapshotCreationTime", latestResourceSnapshot.GetCreationTimestamp(), + "resourceSnapshotCreationMinimumInterval", r.Config.ResourceSnapshotCreationMinimumInterval, "resourceChangesCollectionDuration", r.Config.ResourceChangesCollectionDuration, + "afterDuration", nextCreationTime.Sub(now)) + return ctrl.Result{RequeueAfter: nextCreationTime.Sub(now)}, nil + } + return ctrl.Result{}, nil +} + +// deleteRedundantResourceSnapshots handles multiple snapshots in a group. +func (r *ResourceSnapshotResolver) deleteRedundantResourceSnapshots(ctx context.Context, placementObj fleetv1beta1.PlacementObj, revisionHistoryLimit int) error { + sortedList, err := r.listSortedResourceSnapshots(ctx, placementObj) + if err != nil { + return err + } + + items := sortedList.GetResourceSnapshotObjs() + if len(items) < revisionHistoryLimit { + // If the number of existing snapshots is less than the limit no matter how many snapshots in a group, we don't + // need to delete any snapshots. + // Skip the checking and deleting. + return nil + } + + placementKObj := klog.KObj(placementObj) + lastGroupIndex := -1 + groupCounter := 0 + + // delete the snapshots from the end as there are could be multiple snapshots in a group in order to keep the latest + // snapshots from the end. + for i := len(items) - 1; i >= 0; i-- { + snapshotKObj := klog.KObj(items[i]) + ii, err := labels.ExtractResourceIndexFromResourceSnapshot(items[i]) + if err != nil { + klog.ErrorS(err, "Failed to parse the resource index label", "placement", placementKObj, "resourceSnapshot", snapshotKObj) + return NewUnexpectedBehaviorError(err) + } + if ii != lastGroupIndex { + groupCounter++ + lastGroupIndex = ii + } + if groupCounter < revisionHistoryLimit { // need to reserve one slot for the new snapshot + // When the number of group is less than the revision limit, skipping deleting the snapshot. + continue + } + if err := r.Client.Delete(ctx, items[i]); err != nil && !apierrors.IsNotFound(err) { + klog.ErrorS(err, "Failed to delete resourceSnapshot", "placement", placementKObj, "resourceSnapshot", snapshotKObj) + return NewAPIServerError(false, err) + } + } + if groupCounter-revisionHistoryLimit > 0 { + // We always delete before creating a new snapshot, the snapshot group size should never exceed the limit + // as there is no finalizer added and the object should be deleted immediately. + klog.Warning("The number of resourceSnapshot groups exceeds the revisionHistoryLimit and it should never happen", "placement", placementKObj, "numberOfSnapshotGroups", groupCounter, "revisionHistoryLimit", revisionHistoryLimit) + } + return nil +} + +// createResourceSnapshot sets placement owner reference on the resource snapshot and creates it. +// Now supports both cluster-scoped and namespace-scoped placements using interface types. +func (r *ResourceSnapshotResolver) createResourceSnapshot(ctx context.Context, placementObj fleetv1beta1.PlacementObj, resourceSnapshot fleetv1beta1.ResourceSnapshotObj) error { + resourceSnapshotKObj := klog.KObj(resourceSnapshot) + if err := controllerutil.SetControllerReference(placementObj, resourceSnapshot, r.Scheme); err != nil { + klog.ErrorS(err, "Failed to set owner reference", "resourceSnapshot", resourceSnapshotKObj) + // should never happen + return NewUnexpectedBehaviorError(err) + } + if err := r.Client.Create(ctx, resourceSnapshot); err != nil { + klog.ErrorS(err, "Failed to create new resourceSnapshot", "resourceSnapshot", resourceSnapshotKObj) + return NewAPIServerError(false, err) + } + klog.V(2).InfoS("Created new resourceSnapshot", "placement", klog.KObj(placementObj), "resourceSnapshot", resourceSnapshotKObj) + return nil +} + // FetchAllResourceSnapshotsAlongWithMaster fetches the group of resourceSnapshot or resourceSnapshots using the latest master resourceSnapshot. func FetchAllResourceSnapshotsAlongWithMaster(ctx context.Context, k8Client client.Reader, placementKey string, masterResourceSnapshot fleetv1beta1.ResourceSnapshotObj) (map[string]fleetv1beta1.ResourceSnapshotObj, error) { resourceSnapshots := make(map[string]fleetv1beta1.ResourceSnapshotObj) @@ -222,11 +636,10 @@ func DeleteResourceSnapshots(ctx context.Context, k8Client client.Client, placem return nil } -// BuildMasterResourceSnapshot builds and returns the master resource snapshot for the latest resource snapshot index and selected resources. -// If the placement is namespace-scoped, it creates a namespace-scoped ResourceSnapshot; otherwise, it creates a cluster-scoped ClusterResourceSnapshot. -func BuildMasterResourceSnapshot(placementObj fleetv1beta1.PlacementObj, latestResourceSnapshotIndex, resourceSnapshotCount, envelopeObjCount int, resourceHash string, selectedResources []fleetv1beta1.ResourceContent) fleetv1beta1.ResourceSnapshotObj { +// buildMasterResourceSnapshot builds and returns the master resource snapshot for the latest resource snapshot index and selected resources. +func BuildMasterResourceSnapshot(latestResourceSnapshotIndex, resourceSnapshotCount, envelopeObjCount int, placementName, placementNamespace, resourceHash string, selectedResources []fleetv1beta1.ResourceContent) fleetv1beta1.ResourceSnapshotObj { labels := map[string]string{ - fleetv1beta1.PlacementTrackingLabel: placementObj.GetName(), + fleetv1beta1.PlacementTrackingLabel: placementName, fleetv1beta1.IsLatestSnapshotLabel: strconv.FormatBool(true), fleetv1beta1.ResourceIndexLabel: strconv.Itoa(latestResourceSnapshotIndex), } @@ -238,27 +651,63 @@ func BuildMasterResourceSnapshot(placementObj fleetv1beta1.PlacementObj, latestR spec := fleetv1beta1.ResourceSnapshotSpec{ SelectedResources: selectedResources, } - - // If namespace is provided, create a namespace-scoped ResourceSnapshot - if placementObj.GetNamespace() != "" { + if placementNamespace == "" { + // Cluster-scoped placement + return &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, placementName, latestResourceSnapshotIndex), + Labels: labels, + Annotations: annotations, + }, + Spec: spec, + } + } else { + // Namespace-scoped placement return &fleetv1beta1.ResourceSnapshot{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, placementObj.GetName(), latestResourceSnapshotIndex), - Namespace: placementObj.GetNamespace(), + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, placementName, latestResourceSnapshotIndex), + Namespace: placementNamespace, Labels: labels, Annotations: annotations, }, Spec: spec, } } +} - // Otherwise, create a cluster-scoped ClusterResourceSnapshot - return &fleetv1beta1.ClusterResourceSnapshot{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, placementObj.GetName(), latestResourceSnapshotIndex), - Labels: labels, - Annotations: annotations, - }, - Spec: spec, +// BuildSubIndexResourceSnapshot builds and returns the sub index resource snapshot for both cluster-scoped and namespace-scoped placements. +// Returns a ClusterResourceSnapshot for cluster-scoped placements (empty namespace) or ResourceSnapshot for namespace-scoped placements. +func BuildSubIndexResourceSnapshot(latestResourceSnapshotIndex, resourceSnapshotSubIndex int, placementName, placementNamespace string, selectedResources []fleetv1beta1.ResourceContent) fleetv1beta1.ResourceSnapshotObj { + labels := map[string]string{ + fleetv1beta1.PlacementTrackingLabel: placementName, + fleetv1beta1.ResourceIndexLabel: strconv.Itoa(latestResourceSnapshotIndex), + } + annotations := map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: strconv.Itoa(resourceSnapshotSubIndex), + } + spec := fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: selectedResources, + } + if placementNamespace == "" { + // Cluster-scoped placement + return &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, placementName, latestResourceSnapshotIndex, resourceSnapshotSubIndex), + Labels: labels, + Annotations: annotations, + }, + Spec: spec, + } + } else { + // Namespace-scoped placement + return &fleetv1beta1.ResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, placementName, latestResourceSnapshotIndex, resourceSnapshotSubIndex), + Namespace: placementNamespace, + Labels: labels, + Annotations: annotations, + }, + Spec: spec, + } } } diff --git a/pkg/utils/controller/resource_snapshot_resolver_test.go b/pkg/utils/controller/resource_snapshot_resolver_test.go index f55e44fc7..2fc3c71c6 100644 --- a/pkg/utils/controller/resource_snapshot_resolver_test.go +++ b/pkg/utils/controller/resource_snapshot_resolver_test.go @@ -18,6 +18,8 @@ package controller import ( "context" + "crypto/sha256" + "encoding/json" "errors" "fmt" "maps" @@ -28,13 +30,23 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" fleetv1beta1 "github.com/kubefleet-dev/kubefleet/apis/placement/v1beta1" + "github.com/kubefleet-dev/kubefleet/pkg/utils/defaulter" + "github.com/kubefleet-dev/kubefleet/test/utils/resource" +) + +const ( + testCRPName = "my-crp" + placementGeneration = 15 ) var resourceSnapshotCmpOptions = []cmp.Option{ @@ -46,6 +58,61 @@ var resourceSnapshotCmpOptions = []cmp.Option{ }), } +var ( + fleetAPIVersion = fleetv1beta1.GroupVersion.String() + sortClusterResourceSnapshotOption = cmpopts.SortSlices(func(r1, r2 fleetv1beta1.ClusterResourceSnapshot) bool { + return r1.Name < r2.Name + }) + + singleRevisionLimit = int32(1) + multipleRevisionLimit = int32(2) + invalidRevisionLimit = int32(0) +) + +func placementPolicyForTest() *fleetv1beta1.PlacementPolicy { + return &fleetv1beta1.PlacementPolicy{ + PlacementType: fleetv1beta1.PickNPlacementType, + NumberOfClusters: ptr.To(int32(3)), + Affinity: &fleetv1beta1.Affinity{ + ClusterAffinity: &fleetv1beta1.ClusterAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &fleetv1beta1.ClusterSelector{ + ClusterSelectorTerms: []fleetv1beta1.ClusterSelectorTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "key1": "value1", + }, + }, + }, + }, + }, + }, + }, + } +} + +func clusterResourcePlacementForTest() *fleetv1beta1.ClusterResourcePlacement { + return &fleetv1beta1.ClusterResourcePlacement{ + ObjectMeta: metav1.ObjectMeta{ + Name: testCRPName, + Generation: placementGeneration, + }, + Spec: fleetv1beta1.PlacementSpec{ + ResourceSelectors: []fleetv1beta1.ResourceSelectorTerm{ + { + Group: corev1.GroupName, + Version: "v1", + Kind: "Service", + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"region": "east"}, + }, + }, + }, + Policy: placementPolicyForTest(), + }, + } +} + func TestFetchAllResourceSnapshotsAlongWithMaster(t *testing.T) { tests := []struct { name string @@ -1592,10 +1659,11 @@ func TestBuildMasterResourceSnapshot(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { result := BuildMasterResourceSnapshot( - tt.placementObj, tt.latestResourceSnapshotIndex, tt.resourceSnapshotCount, tt.envelopeObjCount, + tt.placementObj.GetName(), + tt.placementObj.GetNamespace(), tt.resourceHash, tt.selectedResources, ) @@ -1812,3 +1880,2181 @@ type errorClient struct { func (e *errorClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return fmt.Errorf("failed to list") } + +func TestGetOrCreateClusterResourceSnapshot(t *testing.T) { + // test service is 383 bytes in size. + serviceResourceContent := *resource.ServiceResourceContentForTest(t) + // test deployment 390 bytes in size. + deploymentResourceContent := *resource.DeploymentResourceContentForTest(t) + // test secret is 152 bytes in size. + secretResourceContent := *resource.SecretResourceContentForTest(t) + + jsonBytes, err := json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}) + if err != nil { + t.Fatalf("failed to create the resourceSnapshotSpecWithSingleResourceHash hash: %v", err) + } + resourceSnapshotSpecWithEmptyResourceHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + jsonBytes, err = json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}) + if err != nil { + t.Fatalf("failed to create the resourceSnapshotSpecWithSingleResource hash: %v", err) + } + resourceSnapshotSpecWithServiceResourceHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + jsonBytes, err = json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}) + if err != nil { + t.Fatalf("failed to create the resourceSnapshotSpecWithMultipleResources hash: %v", err) + } + resourceSnapshotSpecWithTwoResourcesHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + jsonBytes, err = json.Marshal(&fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent, deploymentResourceContent}}) + if err != nil { + t.Fatalf("failed to create the resourceSnapshotSpecWithMultipleResources hash: %v", err) + } + resourceSnapshotSpecWithMultipleResourcesHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + now := metav1.Now() + nowToString := now.Time.Format(time.RFC3339) + tests := []struct { + name string + envelopeObjCount int + selectedResourcesSizeLimit int + resourceSnapshotSpec *fleetv1beta1.ResourceSnapshotSpec + revisionHistoryLimit *int32 + resourceSnapshots []fleetv1beta1.ClusterResourceSnapshot + wantResourceSnapshots []fleetv1beta1.ClusterResourceSnapshot + wantLatestSnapshotIndex int // index of the wantPolicySnapshots array + wantRequeue bool + }{ + { + name: "new resourceSnapshot and no existing snapshots owned by my-crp", + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + revisionHistoryLimit: &invalidRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "another-crp-1", + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: "another-crp", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "another-crp-1", + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: "another-crp", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + }, + // new resource snapshot owned by the my-crp + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 1, + }, + { + name: "resource has no change", + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + revisionHistoryLimit: &singleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "resource has changed and there is no active snapshot with single revisionLimit", + envelopeObjCount: 2, + // It happens when last reconcile loop fails after setting the latest label to false and + // before creating a new resource snapshot. + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + revisionHistoryLimit: &singleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + // new resource snapshot + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithEmptyResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "2", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "resource has changed too fast and there is an active snapshot with multiple revisionLimit", + envelopeObjCount: 3, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + revisionHistoryLimit: &multipleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: nowToString, + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + }, + wantRequeue: true, + wantLatestSnapshotIndex: 0, + }, + { + name: "resource has changed and there is an active snapshot with multiple revisionLimit", + envelopeObjCount: 3, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + revisionHistoryLimit: &multipleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "true", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), + }, + CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.IsLatestSnapshotLabel: "false", + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + // new resource snapshot + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithEmptyResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "3", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{}}, + }, + }, + wantLatestSnapshotIndex: 3, + }, + { + name: "resource has been changed and reverted back and there is no active snapshot", + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 1, + }, + { + name: "selected resource cross clusterResourceSnapshot size limit, no existing clusterResourceSnapshots", + selectedResourcesSizeLimit: 600, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent, deploymentResourceContent}}, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "selected resource cross clusterResourceSnapshot size limit, master clusterResourceSnapshot created but not all sub-indexed clusterResourceSnapshots have been created", + selectedResourcesSizeLimit: 100, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent, deploymentResourceContent}}, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "selected resources cross clusterResourceSnapshot limit, revision limit is 1, delete existing clusterResourceSnapshots & create new clusterResourceSnapshots", + selectedResourcesSizeLimit: 100, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, + revisionHistoryLimit: &singleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), + }, + CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "resource has changed too fast, selected resources cross clusterResourceSnapshot limit, revision limit is 1", + selectedResourcesSizeLimit: 100, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, + revisionHistoryLimit: &singleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: nowToString, + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "1", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{deploymentResourceContent}}, + }, + }, + wantRequeue: true, + wantLatestSnapshotIndex: 0, + }, + { + name: "selected resources cross clusterResourceSnapshot limit, revision limit is 1, delete existing clusterResourceSnapshot with missing sub-indexed snapshots & create new clusterResourceSnapshots", + selectedResourcesSizeLimit: 100, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + revisionHistoryLimit: &singleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithMultipleResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "3", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), + }, + CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "selected resources cross clusterResourceSnapshot limit, revision limit is 2, don't delete existing clusterResourceSnapshots & create new clusterResourceSnapshots", + selectedResourcesSizeLimit: 100, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, + revisionHistoryLimit: &multipleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), + }, + CreationTimestamp: metav1.NewTime(now.Time.Add(-1 * time.Hour)), + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "false", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation: now.Add(-5 * time.Minute).Format(time.RFC3339), + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 2), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 2, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "2", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 1, + }, + { + name: "selected resource cross clusterResourceSnapshot size limit, all clusterResourceSnapshots remain the same since no change", + selectedResourcesSizeLimit: 100, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, + revisionHistoryLimit: &singleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 0, + }, + { + name: "selected resource cross clusterResourceSnapshot size limit, all clusterResourceSnapshots remain the same, but IsLatestSnapshotLabel is set to false", + selectedResourcesSizeLimit: 100, + resourceSnapshotSpec: &fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent, secretResourceContent}}, + revisionHistoryLimit: &multipleRevisionLimit, + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "false", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + CreationTimestamp: now, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "false", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + }, + wantResourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "false", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithServiceResourceHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "1", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: resourceSnapshotSpecWithTwoResourcesHash, + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + fleetv1beta1.NumberOfEnvelopedObjectsAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{serviceResourceContent}}, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "0", + }, + }, + Spec: fleetv1beta1.ResourceSnapshotSpec{SelectedResources: []fleetv1beta1.ResourceContent{secretResourceContent}}, + }, + }, + wantLatestSnapshotIndex: 1, + }, + } + originalResourceSnapshotResourceSizeLimit := resourceSnapshotResourceSizeLimit + defer func() { + resourceSnapshotResourceSizeLimit = originalResourceSnapshotResourceSizeLimit + }() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + crp := clusterResourcePlacementForTest() + crp.Spec.RevisionHistoryLimit = tc.revisionHistoryLimit + objects := []client.Object{crp} + for i := range tc.resourceSnapshots { + objects = append(objects, &tc.resourceSnapshots[i]) + } + scheme := serviceScheme(t) + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + resolver := NewResourceSnapshotResolver(fakeClient, scheme) + resolver.Config = NewResourceSnapshotConfig(1*time.Minute, 0) + limit := int32(defaulter.DefaultRevisionHistoryLimitValue) + if tc.revisionHistoryLimit != nil { + limit = *tc.revisionHistoryLimit + } + resourceSnapshotResourceSizeLimit = tc.selectedResourcesSizeLimit + res, got, err := resolver.GetOrCreateResourceSnapshot(ctx, crp, tc.envelopeObjCount, tc.resourceSnapshotSpec, int(limit)) + if err != nil { + t.Fatalf("failed to handle getOrCreateResourceSnapshot: %v", err) + } + if (res.RequeueAfter > 0) != tc.wantRequeue { + t.Fatalf("GetOrCreateResourceSnapshot() got Requeue %v, want %v", (res.RequeueAfter > 0), tc.wantRequeue) + } + + options := []cmp.Option{ + cmpopts.IgnoreFields(metav1.ObjectMeta{}, "ResourceVersion", "CreationTimestamp"), + // Fake API server will add a newline for the runtime.RawExtension type. + // ignoring the resourceContent field for now + cmpopts.IgnoreFields(runtime.RawExtension{}, "Raw"), + } + if tc.wantRequeue { + if res.RequeueAfter <= 0 { + t.Fatalf("GetOrCreateResourceSnapshot() got RequeueAfter %v, want greater than zero value", res.RequeueAfter) + } + } + annotationOption := cmp.Transformer("NormalizeAnnotations", func(m map[string]string) map[string]string { + normalized := map[string]string{} + for k, v := range m { + if k == fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation { + // Normalize the resource group hash annotation to a fixed value for comparison. + if _, err := time.Parse(time.RFC3339, v); err != nil { + normalized[k] = "" + } + normalized[k] = nowToString + } else { + normalized[k] = v + } + } + return normalized + }) + options = append(options, sortClusterResourceSnapshotOption, annotationOption) + gotSnapshot, ok := got.(*fleetv1beta1.ClusterResourceSnapshot) + if !ok { + t.Fatalf("expected *fleetv1beta1.ClusterResourceSnapshot, got %T", got) + } + if diff := cmp.Diff(tc.wantResourceSnapshots[tc.wantLatestSnapshotIndex], *gotSnapshot, options...); diff != "" { + t.Errorf("GetOrCreateResourceSnapshot() mismatch (-want, +got):\n%s", diff) + } + clusterResourceSnapshotList := &fleetv1beta1.ClusterResourceSnapshotList{} + if err := fakeClient.List(ctx, clusterResourceSnapshotList); err != nil { + t.Fatalf("clusterResourceSnapshot List() got error %v, want no error", err) + } + if diff := cmp.Diff(tc.wantResourceSnapshots, clusterResourceSnapshotList.Items, options...); diff != "" { + t.Errorf("clusterResourceSnapshot List() mismatch (-want, +got):\n%s", diff) + } + }) + } +} + +func TestGetOrCreateClusterResourceSnapshot_failure(t *testing.T) { + selectedResources := []fleetv1beta1.ResourceContent{ + *resource.ServiceResourceContentForTest(t), + } + resourceSnapshotSpecA := &fleetv1beta1.ResourceSnapshotSpec{ + SelectedResources: selectedResources, + } + tests := []struct { + name string + resourceSnapshots []fleetv1beta1.ClusterResourceSnapshot + }{ + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "existing active resource snapshot does not have resourceIndex label", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "existing active resource snapshot does not have hash annotation", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "0", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "no active resource snapshot exists and resourceSnapshot with invalid resourceIndex label", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "abc", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "no active resource snapshot exists and multiple resourceSnapshots with invalid resourceIndex label", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "abc", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "abc", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "no active resource snapshot exists and multiple resourceSnapshots with invalid subindex annotation", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "0", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "0", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "abc", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "1", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "abc", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "no active resource snapshot exists and multiple resourceSnapshots with invalid subindex (<0) annotation", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "0", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "0", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 0, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "-1", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.PlacementTrackingLabel: testCRPName, + fleetv1beta1.ResourceIndexLabel: "1", + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "abc", + fleetv1beta1.NumberOfResourceSnapshotsAnnotation: "2", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameWithSubindexFmt, testCRPName, 1, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.SubindexOfResourceSnapshotAnnotation: "-1", + }, + }, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterResourceSnapshot. + name: "multiple active resource snapshot exist", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "0", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hashA", + }, + }, + Spec: *resourceSnapshotSpecA, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 1), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "1", + fleetv1beta1.IsLatestSnapshotLabel: "true", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hashA", + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + }, + { + // Should never hit this case unless there is a bug in the controller or customers manually modify the clusterPolicySnapshot. + name: "no active resource snapshot exists and resourceSnapshot with invalid resourceIndex label (negative value)", + resourceSnapshots: []fleetv1beta1.ClusterResourceSnapshot{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf(fleetv1beta1.ResourceSnapshotNameFmt, testCRPName, 0), + Labels: map[string]string{ + fleetv1beta1.ResourceIndexLabel: "-12", + fleetv1beta1.PlacementTrackingLabel: testCRPName, + }, + OwnerReferences: []metav1.OwnerReference{ + { + Name: testCRPName, + BlockOwnerDeletion: ptr.To(true), + Controller: ptr.To(true), + APIVersion: fleetAPIVersion, + Kind: "ClusterResourcePlacement", + }, + }, + Annotations: map[string]string{ + fleetv1beta1.ResourceGroupHashAnnotation: "hashA", + }, + }, + Spec: *resourceSnapshotSpecA, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + crp := clusterResourcePlacementForTest() + objects := []client.Object{crp} + for i := range tc.resourceSnapshots { + objects = append(objects, &tc.resourceSnapshots[i]) + } + scheme := serviceScheme(t) + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + resolver := NewResourceSnapshotResolver(fakeClient, scheme) + res, _, err := resolver.GetOrCreateResourceSnapshot(ctx, crp, 0, resourceSnapshotSpecA, 1) + if err == nil { // if error is nil + t.Fatal("GetOrCreateClusterResourceSnapshot() = nil, want err") + } + if res.RequeueAfter > 0 { + t.Fatal("GetOrCreateClusterResourceSnapshot() requeue = true, want false") + } + if !errors.Is(err, ErrUnexpectedBehavior) { + t.Errorf("GetOrCreateClusterResourceSnapshot() got %v, want %v type", err, ErrUnexpectedBehavior) + } + }) + } +} + +func TestShouldCreateNewResourceSnapshotNow(t *testing.T) { + now := time.Now() + + cases := []struct { + name string + creationInterval time.Duration + collectionDuration time.Duration + creationTime time.Time + annotationValue string + wantAnnoation bool + wantRequeue ctrl.Result + }{ + { + name: "ResourceSnapshotCreationMinimumInterval and ResourceChangesCollectionDuration are 0", + creationInterval: 0, + collectionDuration: 0, + wantRequeue: ctrl.Result{Requeue: false}, + }, + { + name: "ResourceSnapshotCreationMinimumInterval is 0", + creationInterval: 0, + collectionDuration: 30 * time.Second, + annotationValue: now.Add(-10 * time.Second).Format(time.RFC3339), + wantAnnoation: true, + wantRequeue: ctrl.Result{RequeueAfter: 20 * time.Second}, + }, + { + name: "ResourceChangesCollectionDuration is 0", + creationInterval: 300 * time.Second, + collectionDuration: 0, + creationTime: now.Add(-5 * time.Second), + // no annotation → sets it and requeues + annotationValue: "", + wantAnnoation: true, + wantRequeue: ctrl.Result{RequeueAfter: 295 * time.Second}, + }, + { + name: "next detection time (now) + collection duration < latest resource snapshot creation time + creation interval", + creationInterval: 300 * time.Second, + collectionDuration: 30 * time.Second, + creationTime: now.Add(-5 * time.Second), + // no annotation → sets it and requeues + annotationValue: "", + wantAnnoation: true, + wantRequeue: ctrl.Result{RequeueAfter: 295 * time.Second}, + }, + { + name: "next detection time (annotation) + collection duration < latest resource snapshot creation time + creation interval", + creationInterval: 300 * time.Second, + collectionDuration: 30 * time.Second, + creationTime: now.Add(-10 * time.Second), + annotationValue: now.Add(-5 * time.Second).Format(time.RFC3339), + wantAnnoation: true, + wantRequeue: ctrl.Result{RequeueAfter: 290 * time.Second}, + }, + { + name: "last resource snapshot created long time before", + creationInterval: 60 * time.Second, + collectionDuration: 30 * time.Second, + creationTime: now.Add(-1 * time.Hour), + wantAnnoation: true, + wantRequeue: ctrl.Result{RequeueAfter: 30 * time.Second}, + }, + { + name: "next detection time (now) + collection duration >= latest resource snapshot creation time + creation interval", + creationInterval: 60 * time.Second, + collectionDuration: 60 * time.Second, + creationTime: now.Add(-40 * time.Second), + wantAnnoation: true, + wantRequeue: ctrl.Result{RequeueAfter: 60 * time.Second}, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + // initialize a snapshot with given creation time and annotation + snapshot := &fleetv1beta1.ClusterResourceSnapshot{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-snapshot", + CreationTimestamp: metav1.Time{Time: tc.creationTime}, + Annotations: map[string]string{}, + }, + } + if tc.annotationValue != "" { + snapshot.Annotations[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation] = tc.annotationValue + } + + // use fake client seeded with the snapshot + scheme := serviceScheme(t) + client := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(snapshot.DeepCopy()). + Build() + + resolver := NewResourceSnapshotResolver(client, nil) + resolver.Config = NewResourceSnapshotConfig(tc.creationInterval, // Fast creation + tc.collectionDuration, // Longer collection + ) + + ctx := context.Background() + if err := client.Get(ctx, types.NamespacedName{Name: snapshot.Name}, snapshot); err != nil { + t.Fatalf("Failed to get snapshot: %v", err) + } + got, err := resolver.shouldCreateNewResourceSnapshotNow(ctx, snapshot) + if err != nil { + t.Fatalf("shouldCreateNewResourceSnapshotNow() failed: %v", err) + } + cmpOptions := []cmp.Option{cmp.Comparer(func(d1, d2 time.Duration) bool { + if d1 == 0 { + return d2 == 0 // both are zero + } + return time.Duration.Abs(d1-d2) < 3*time.Second // allow 1 second difference + })} + if !cmp.Equal(got, tc.wantRequeue, cmpOptions...) { + t.Errorf("shouldCreateNewResourceSnapshotNow() = %v, want %v", got, tc.wantRequeue) + } + if err := client.Get(ctx, types.NamespacedName{Name: snapshot.Name}, snapshot); err != nil { + t.Fatalf("failed to get snapshot after shouldCreateNewResourceSnapshotNow: %v", err) + } + if gotAnnotation := len(snapshot.Annotations[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation]) != 0; tc.wantAnnoation != gotAnnotation { + t.Errorf("shouldCreateNewResourceSnapshotNow() = annotation %v, want %v", snapshot.Annotations[fleetv1beta1.NextResourceSnapshotCandidateDetectionTimeAnnotation], tc.wantAnnoation) + } + }) + } +} From fa1993555902e4c3271c3389635a905184111d84 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Wed, 18 Feb 2026 10:10:25 -0500 Subject: [PATCH 15/17] fix: fix race condition in envelope work object creation via deterministic naming (#450) --- pkg/controllers/workgenerator/envelope.go | 33 +++++++++++++++++-- .../workgenerator/envelope_test.go | 12 +++++++ test/e2e/enveloped_object_placement_test.go | 2 +- 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/pkg/controllers/workgenerator/envelope.go b/pkg/controllers/workgenerator/envelope.go index a7d2f5a8e..ef85895a8 100644 --- a/pkg/controllers/workgenerator/envelope.go +++ b/pkg/controllers/workgenerator/envelope.go @@ -18,13 +18,14 @@ package workgenerator import ( "context" + "crypto/sha256" + "encoding/hex" "fmt" "sort" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,12 +82,19 @@ func (r *Reconciler) createOrUpdateEnvelopeCRWorkObj( var work *fleetv1beta1.Work switch { case len(workList.Items) > 1: - // Multiple matching work objects found; this should never occur under normal conditions. + // Multiple matching work objects found; this should never occur under normal conditions + // with deterministic naming. Log details for investigation. wrappedErr := fmt.Errorf("%d work objects found for the same envelope %v, only one expected", len(workList.Items), envelopeReader.GetEnvelopeObjRef()) klog.ErrorS(wrappedErr, "Failed to create or update work object for envelope", "resourceBinding", klog.KObj(binding), "resourceSnapshot", klog.KObj(resourceSnapshot), "envelope", envelopeReader.GetEnvelopeObjRef()) + // Log the work object names to help debug + for i := range workList.Items { + klog.ErrorS(wrappedErr, "Duplicate work object found", + "work", klog.KObj(&workList.Items[i]), + "creationTimestamp", workList.Items[i].CreationTimestamp) + } return nil, controller.NewUnexpectedBehaviorError(wrappedErr) case len(workList.Items) == 1: klog.V(2).InfoS("Found existing work object for the envelope; updating it", @@ -196,7 +204,11 @@ func buildNewWorkForEnvelopeCR( manifests []fleetv1beta1.Manifest, resourceOverrideSnapshotHash, clusterResourceOverrideSnapshotHash string, ) *fleetv1beta1.Work { - workName := fmt.Sprintf(fleetv1beta1.WorkNameWithEnvelopeCRFmt, workNamePrefix, uuid.NewUUID()) + // Generate a deterministic work name based on the envelope identity to prevent duplicate work objects + // from being created by concurrent reconciliations. The name is stable across reconciliations for + // the same envelope, allowing Kubernetes' built-in duplicate prevention to work correctly. + envelopeIdentifier := generateEnvelopeIdentifier(envelopeReader) + workName := fmt.Sprintf(fleetv1beta1.WorkNameWithEnvelopeCRFmt, workNamePrefix, envelopeIdentifier) workNamespace := fmt.Sprintf(utils.NamespaceNameFormat, resourceBinding.GetBindingSpec().TargetCluster) // Create the labels map @@ -234,3 +246,18 @@ func buildNewWorkForEnvelopeCR( }, } } + +// generateEnvelopeIdentifier generates a stable, deterministic identifier for an envelope. +// This identifier is used in the work name to ensure that the same envelope always produces +// the same work name, enabling Kubernetes' atomic create operations to prevent duplicates. +func generateEnvelopeIdentifier(envelopeReader fleetv1beta1.EnvelopeReader) string { + // Create a stable identifier based on envelope type, name, and namespace. + // For cluster-scoped envelopes, namespace is empty, so we include the type to ensure uniqueness. + identifier := fmt.Sprintf("%s.%s.%s", envelopeReader.GetEnvelopeType(), envelopeReader.GetNamespace(), envelopeReader.GetName()) + + // Use SHA256 hash to create a deterministic identifier + hash := sha256.Sum256([]byte(identifier)) + // Take first 8 characters of the hex-encoded hash to keep work names reasonably short + // while maintaining uniqueness (8 hex chars = 4 bytes = 2^32 combinations, sufficient for envelope uniqueness) + return hex.EncodeToString(hash[:])[:8] +} diff --git a/pkg/controllers/workgenerator/envelope_test.go b/pkg/controllers/workgenerator/envelope_test.go index 310553777..c8b112a53 100644 --- a/pkg/controllers/workgenerator/envelope_test.go +++ b/pkg/controllers/workgenerator/envelope_test.go @@ -483,6 +483,18 @@ func TestCreateOrUpdateEnvelopeCRWorkObj(t *testing.T) { want: nil, wantErr: true, }, + { + name: "two existing works should result in error", + envelopeReader: resourceEnvelope, + resourceOverrideSnapshotHash: "new-resource-hash", + clusterResourceOverrideSnapshotHash: "new-cluster-resource-hash", + existingObjects: func() []client.Object { + existingWork1 := existingWork.DeepCopy() + existingWork1.Name = "test-work-1" + return []client.Object{existingWork, existingWork1} + }(), + wantErr: true, + }, } for _, tt := range tests { diff --git a/test/e2e/enveloped_object_placement_test.go b/test/e2e/enveloped_object_placement_test.go index b52451958..6ba2edcac 100644 --- a/test/e2e/enveloped_object_placement_test.go +++ b/test/e2e/enveloped_object_placement_test.go @@ -140,7 +140,7 @@ var _ = Describe("placing wrapped resources using a CRP", func() { for idx := range allMemberClusters { memberCluster := allMemberClusters[idx] workResourcesPlacedActual := checkAllResourcesPlacement(memberCluster) - Eventually(workResourcesPlacedActual, eventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) + Eventually(workResourcesPlacedActual, workloadEventuallyDuration, eventuallyInterval).Should(Succeed(), "Failed to place work resources on member cluster %s", memberCluster.ClusterName) } }) From d66cbf74aeb4ee0bbcc6b63e90ed7a19ddb76a64 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 19 Feb 2026 23:34:43 +0800 Subject: [PATCH 16/17] Minor fixes Signed-off-by: michaelawyu --- docker/crd-installer.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/crd-installer.Dockerfile b/docker/crd-installer.Dockerfile index 07d8c6a6c..cf0bfd154 100644 --- a/docker/crd-installer.Dockerfile +++ b/docker/crd-installer.Dockerfile @@ -1,5 +1,5 @@ # Build the crdinstaller binary -FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.12 AS builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:1.24.13 AS builder ARG GOOS=linux ARG GOARCH=amd64 From 71e152d0a7d86140762475a1f9c951dd866885b1 Mon Sep 17 00:00:00 2001 From: michaelawyu Date: Thu, 19 Feb 2026 23:46:07 +0800 Subject: [PATCH 17/17] Minor fixes Signed-off-by: michaelawyu --- .github/workflows/ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b15723b0..d40015701 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,8 +31,7 @@ jobs: concurrent_skipping: false unit-and-integration-tests: - runs-on: - labels: oracle-vm-16cpu-64gb-x86-64 + runs-on: ubuntu-latest needs: detect-noop if: needs.detect-noop.outputs.noop != 'true' steps: