diff --git a/.dockerignore b/.dockerignore index a621a1e50..cafbede61 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,7 +1,6 @@ .git .idea test -vendor build/_output/bin/splunk-operator-local splunk-operator-*.yaml splunk-operator-*.tar.gz diff --git a/.gitignore b/.gitignore index 4846768ad..30ba3b8d8 100644 --- a/.gitignore +++ b/.gitignore @@ -99,4 +99,5 @@ bundle_*/ test/secret/*.log kubeconfig .devcontainer/devcontainer.json -kuttl-artifacts/* \ No newline at end of file +kuttl-artifacts/* +.tool-versions diff --git a/PROJECT b/PROJECT index 62abf2007..34fb3d9b2 100644 --- a/PROJECT +++ b/PROJECT @@ -1,3 +1,7 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: splunk.com layout: - go.kubebuilder.io/v4 @@ -109,4 +113,31 @@ resources: kind: LicenseManager path: github.com/splunk/splunk-operator/api/v4 version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: PostgresCluster + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: PostgresClusterClass + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: PostgresDatabase + path: github.com/splunk/splunk-operator/api/v4 + version: v4 version: "3" diff --git a/api/v4/postgrescluster_types.go b/api/v4/postgrescluster_types.go new file mode 100644 index 000000000..4464eaf1b --- /dev/null +++ b/api/v4/postgrescluster_types.go @@ -0,0 +1,210 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ManagedRole represents a PostgreSQL role to be created and managed in the cluster. +type ManagedRole struct { + // Name of the role/user to create. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // PasswordSecretRef references a Secret containing the password for this role. + // The Secret should have a key "password" with the password value. + // +optional + PasswordSecretRef *corev1.LocalObjectReference `json:"passwordSecretRef,omitempty"` + + // Ensure controls whether the role should exist (present) or not (absent). + // +kubebuilder:validation:Enum=present;absent + // +kubebuilder:default=present + Ensure string `json:"ensure,omitempty"` +} + +// PostgresClusterSpec defines the desired state of PostgresCluster. +// Validation rules ensure immutability of Class, and that Storage and PostgresVersion can only be set once and cannot be removed or downgraded. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.postgresVersion) || (has(self.postgresVersion) && int(self.postgresVersion.split('.')[0]) >= int(oldSelf.postgresVersion.split('.')[0]))",messageExpression="!has(self.postgresVersion) ? 'postgresVersion cannot be removed once set (was: ' + oldSelf.postgresVersion + ')' : 'postgresVersion major version cannot be downgraded (from: ' + oldSelf.postgresVersion + ', to: ' + self.postgresVersion + ')'" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.storage) || (has(self.storage) && quantity(self.storage).compareTo(quantity(oldSelf.storage)) >= 0)",messageExpression="!has(self.storage) ? 'storage cannot be removed once set (was: ' + string(oldSelf.storage) + ')' : 'storage size cannot be decreased (from: ' + string(oldSelf.storage) + ', to: ' + string(self.storage) + ')'" +// +kubebuilder:validation:XValidation:rule="!has(self.connectionPoolerConfig)",message="connectionPoolerConfig cannot be overridden on PostgresCluster" +type PostgresClusterSpec struct { + // This field is IMMUTABLE after creation. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="class is immutable" + Class string `json:"class"` + + // Storage overrides the storage size from ClusterClass. + // Example: "5Gi" + // +optional + Storage *resource.Quantity `json:"storage,omitempty"` + + // Instances overrides the number of PostgreSQL instances from ClusterClass. + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=10 + Instances *int32 `json:"instances,omitempty"` + + // PostgresVersion is the PostgreSQL version (major or major.minor). + // Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + // +kubebuilder:validation:Pattern=`^[0-9]+(\.[0-9]+)?$` + // +optional + PostgresVersion *string `json:"postgresVersion,omitempty"` + + // Resources overrides CPU/memory resources from ClusterClass. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // PostgreSQL overrides PostgreSQL engine parameters from ClusterClass. + // Maps to postgresql.conf settings. + // Default empty map prevents panic. + // Example: {"shared_buffers": "128MB", "log_min_duration_statement": "500ms"} + // +optional + // +kubebuilder:default={} + PostgreSQLConfig map[string]string `json:"postgresqlConfig,omitempty"` + + // PgHBA contains pg_hba.conf host-based authentication rules. + // Defines client authentication and connection security (cluster-wide). + // Maps to pg_hba.conf settings. + // Default empty array prevents panic. + // Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + // +optional + // +kubebuilder:default={} + PgHBA []string `json:"pgHBA,omitempty"` + + // ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed for this cluster. + // When set, takes precedence over the class-level connectionPoolerEnabled value. + // +kubebuilder:default=false + // +optional + ConnectionPoolerEnabled *bool `json:"connectionPoolerEnabled,omitempty"` + + // Only takes effect when connection pooling is enabled. + // +optional + ConnectionPoolerConfig *ConnectionPoolerConfig `json:"connectionPoolerConfig,omitempty"` + + // ManagedRoles contains PostgreSQL roles that should be created in the cluster. + // This field supports Server-Side Apply with per-role granularity, allowing + // multiple PostgresDatabase controllers to manage different roles independently. + // +optional + // +listType=map + // +listMapKey=name + ManagedRoles []ManagedRole `json:"managedRoles,omitempty"` + + // ClusterDeletionPolicy controls the deletion behavior of the underlying CNPG Cluster when the PostgresCluster is deleted. + // +kubebuilder:validation:Enum=Delete;Retain + // +kubebuilder:default=Retain + // +optional + ClusterDeletionPolicy string `json:"clusterDeletionPolicy,omitempty"` +} + +// PostgresClusterResources defines references to Kubernetes resources related to the PostgresCluster, such as ConfigMaps and Secrets. +type PostgresClusterResources struct { + // ConfigMapRef references the ConfigMap with connection endpoints. + // Contains: CLUSTER_ENDPOINTS, POOLER_ENDPOINTS (if connection pooler enabled) + // +optional + ConfigMapRef *corev1.LocalObjectReference `json:"configMapRef,omitempty"` + + // SecretRef references the Secret with superuser credentials. + // Contains: passwords for superuser + // +optional + SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` +} + +// PostgresClusterStatus defines the observed state of PostgresCluster. +type PostgresClusterStatus struct { + // Phase represents the current phase of the PostgresCluster. + // Values: "Pending", "Provisioning", "Failed", "Ready", "Deleting" + // +optional + Phase string `json:"phase,omitempty"` + + // Conditions represent the latest available observations of the PostgresCluster's state. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ProvisionerRef contains reference to the provisioner resource managing this PostgresCluster. + // Right now, only CNPG is supported. + // +optional + ProvisionerRef *corev1.ObjectReference `json:"provisionerRef,omitempty"` + + // ConnectionPoolerStatus contains the observed state of the connection pooler. + // Only populated when connection pooler is enabled in the PostgresClusterClass. + // +optional + ConnectionPoolerStatus *ConnectionPoolerStatus `json:"connectionPoolerStatus,omitempty"` + + // ManagedRolesStatus tracks the reconciliation status of managed roles. + // +optional + ManagedRolesStatus *ManagedRolesStatus `json:"managedRolesStatus,omitempty"` + + // Resources contains references to related Kubernetes resources like ConfigMaps and Secrets. + // +optional + Resources *PostgresClusterResources `json:"resources,omitempty"` +} + +// ManagedRolesStatus tracks the state of managed PostgreSQL roles. +type ManagedRolesStatus struct { + // Reconciled contains roles that have been successfully created and are ready. + // +optional + Reconciled []string `json:"reconciled,omitempty"` + + // Pending contains roles that are being created but not yet ready. + // +optional + Pending []string `json:"pending,omitempty"` + + // Failed contains roles that failed to reconcile with error messages. + // +optional + Failed map[string]string `json:"failed,omitempty"` +} + +// ConnectionPoolerStatus contains the observed state of the connection pooler. +type ConnectionPoolerStatus struct { + // Enabled indicates whether pooler is active for this cluster. + Enabled bool `json:"enabled"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:printcolumn:name="Class",type=string,JSONPath=`.spec.class` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// PostgresCluster is the Schema for the postgresclusters API. +type PostgresCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresClusterSpec `json:"spec,omitempty"` + Status PostgresClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresClusterList contains a list of PostgresCluster. +type PostgresClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PostgresCluster{}, &PostgresClusterList{}) +} diff --git a/api/v4/postgresclusterclass_types.go b/api/v4/postgresclusterclass_types.go new file mode 100644 index 000000000..92d4a2b24 --- /dev/null +++ b/api/v4/postgresclusterclass_types.go @@ -0,0 +1,203 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:validation:XValidation:rule="!has(self.config) || !has(self.config.connectionPoolerEnabled) || !self.config.connectionPoolerEnabled || (has(self.cnpg) && has(self.cnpg.connectionPooler))",message="cnpg.connectionPooler must be set when config.connectionPoolerEnabled is true" +// PostgresClusterClassSpec defines the desired state of PostgresClusterClass. +// PostgresClusterClass is immutable after creation - it serves as a template for Cluster CRs. +type PostgresClusterClassSpec struct { + // Provisioner identifies which database provisioner to use. + // Currently supported: "postgresql.cnpg.io" (CloudNativePG) + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=postgresql.cnpg.io + Provisioner string `json:"provisioner"` + + // PostgresClusterConfig contains cluster-level configuration. + // These settings apply to PostgresCluster infrastructure. + // Can be overridden in PostgresCluster CR. + // +kubebuilder:default={} + // +optional + Config PosgresClusterClassConfig `json:"config,omitempty"` + + // CNPG contains CloudNativePG-specific configuration and policies. + // Only used when Provisioner is "postgresql.cnpg.io" + // These settings CANNOT be overridden in PostgresCluster CR (platform policy). + // +optional + CNPG *CNPGConfig `json:"cnpg,omitempty"` +} + +// PosgresClusterClassConfig contains provider-agnostic cluster configuration. +// These fields define PostgresCluster infrastructure and can be overridden in PostgresCluster CR. +type PosgresClusterClassConfig struct { + // Instances is the number of database instances (1 primary + N replicas). + // Single instance (1) is suitable for development. + // High availability requires at least 3 instances (1 primary + 2 replicas). + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=10 + // +kubebuilder:default=1 + // +optional + Instances *int32 `json:"instances,omitempty"` + + // Storage is the size of persistent volume for each instance. + // Cannot be decreased after cluster creation (PostgreSQL limitation). + // Recommended minimum: 10Gi for production viability. + // Example: "50Gi", "100Gi", "1Ti" + // +kubebuilder:default="50Gi" + // +optional + Storage *resource.Quantity `json:"storage,omitempty"` + + // PostgresVersion is the PostgreSQL version (major or major.minor). + // Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + // +kubebuilder:validation:Pattern=`^[0-9]+(\.[0-9]+)?$` + // +kubebuilder:default="18" + // +optional + PostgresVersion *string `json:"postgresVersion,omitempty"` + + // Resources defines CPU and memory requests/limits per instance. + // All instances in the cluster have the same resources. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // PostgreSQLConfig contains PostgreSQL engine configuration parameters. + // Maps to postgresql.conf settings (cluster-wide). + // Example: {"max_connections": "200", "shared_buffers": "2GB"} + // +optional + PostgreSQLConfig map[string]string `json:"postgresqlConfig,omitempty"` + + // PgHBA contains pg_hba.conf host-based authentication rules. + // Defines client authentication and connection security (cluster-wide). + // Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + // +optional + PgHBA []string `json:"pgHBA,omitempty"` + + // ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed. + // When true, creates RW and RO pooler deployments for clusters using this class. + // Can be overridden in PostgresCluster CR. + // +kubebuilder:default=false + // +optional + ConnectionPoolerEnabled *bool `json:"connectionPoolerEnabled,omitempty"` +} + +// ConnectionPoolerMode defines the PgBouncer connection pooling strategy. +// +kubebuilder:validation:Enum=session;transaction;statement +type ConnectionPoolerMode string + +const ( + // ConnectionPoolerModeSession assigns a connection for the entire client session (most compatible). + ConnectionPoolerModeSession ConnectionPoolerMode = "session" + + // ConnectionPoolerModeTransaction returns the connection after each transaction (recommended). + ConnectionPoolerModeTransaction ConnectionPoolerMode = "transaction" + + // ConnectionPoolerModeStatement returns the connection after each statement (limited compatibility). + ConnectionPoolerModeStatement ConnectionPoolerMode = "statement" +) + +// ConnectionPoolerConfig defines PgBouncer connection pooler configuration. +// When enabled, creates RW and RO pooler deployments for clusters using this class. +type ConnectionPoolerConfig struct { + // Instances is the number of PgBouncer pod replicas. + // Higher values provide better availability and load distribution. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=10 + // +kubebuilder:default=3 + // +optional + Instances *int32 `json:"instances,omitempty"` + + // Mode defines the connection pooling strategy. + // +kubebuilder:default="transaction" + // +optional + Mode *ConnectionPoolerMode `json:"mode,omitempty"` + + // Config contains PgBouncer configuration parameters. + // Passed directly to CNPG Pooler spec.pgbouncer.parameters. + // See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// CNPGConfig contains CloudNativePG-specific configuration. +// These fields control CNPG operator behavior and enforce platform policies. +// Cannot be overridden in Cluster CR. +type CNPGConfig struct { + // PrimaryUpdateMethod determines how the primary instance is updated. + // "restart" - tolerate brief downtime (suitable for development) + // "switchover" - minimal downtime via automated failover (production-grade) + // + // NOTE: When using "switchover", ensure clusterConfig.instances > 1. + // Switchover requires at least one replica to fail over to. + // +kubebuilder:validation:Enum=restart;switchover + // +kubebuilder:default=switchover + // +optional + PrimaryUpdateMethod string `json:"primaryUpdateMethod,omitempty"` + + // ConnectionPooler contains PgBouncer connection pooler configuration. + // When enabled, creates RW and RO pooler deployments for clusters using this class. + // +optional + ConnectionPooler *ConnectionPoolerConfig `json:"connectionPooler,omitempty"` +} + +// PostgresClusterClassStatus defines the observed state of PostgresClusterClass. +type PostgresClusterClassStatus struct { + // Conditions represent the latest available observations of the PostgresClusterClass state. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Phase represents the current phase of the PostgresClusterClass. + // Valid phases: "Ready", "Invalid" + // +optional + Phase string `json:"phase,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:printcolumn:name="Provisioner",type=string,JSONPath=`.spec.provisioner` +// +kubebuilder:printcolumn:name="Instances",type=integer,JSONPath=`.spec.postgresClusterConfig.instances` +// +kubebuilder:printcolumn:name="Storage",type=string,JSONPath=`.spec.postgresClusterConfig.storage` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.postgresClusterConfig.postgresVersion` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// PostgresClusterClass is the Schema for the postgresclusterclasses API. +// PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. +type PostgresClusterClass struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresClusterClassSpec `json:"spec,omitempty"` + Status PostgresClusterClassStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresClusterClassList contains a list of PostgresClusterClass. +type PostgresClusterClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresClusterClass `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PostgresClusterClass{}, &PostgresClusterClassList{}) +} diff --git a/api/v4/postgresdatabase_types.go b/api/v4/postgresdatabase_types.go new file mode 100644 index 000000000..4a4a280f0 --- /dev/null +++ b/api/v4/postgresdatabase_types.go @@ -0,0 +1,95 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PostgresDatabaseSpec defines the desired state of PostgresDatabase. +// +kubebuilder:validation:XValidation:rule="self.clusterRef == oldSelf.clusterRef",message="clusterRef is immutable" +type PostgresDatabaseSpec struct { + // +kubebuilder:validation:Required + ClusterRef corev1.LocalObjectReference `json:"clusterRef"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.filter(y, y.name == x.name).size() == 1)",message="database names must be unique" + Databases []DatabaseDefinition `json:"databases"` +} + +type DatabaseDefinition struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=30 + Name string `json:"name"` + Extensions []string `json:"extensions,omitempty"` + // +kubebuilder:validation:Enum=Delete;Retain + // +kubebuilder:default=Delete + DeletionPolicy string `json:"deletionPolicy,omitempty"` +} + +type DatabaseInfo struct { + Name string `json:"name"` + Ready bool `json:"ready"` + DatabaseRef *corev1.LocalObjectReference `json:"databaseRef,omitempty"` + AdminUserSecretRef *corev1.LocalObjectReference `json:"adminUserSecretRef,omitempty"` + RWUserSecretRef *corev1.LocalObjectReference `json:"rwUserSecretRef,omitempty"` + ConfigMapRef *corev1.LocalObjectReference `json:"configMap,omitempty"` +} + +// PostgresDatabaseStatus defines the observed state of PostgresDatabase. +type PostgresDatabaseStatus struct { + // +optional + Phase string `json:"phase,omitempty"` + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +optional + Databases []DatabaseInfo `json:"databases,omitempty"` + // ObservedGeneration represents the .metadata.generation that the status was set based upon. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.clusterRef.name` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// PostgresDatabase is the Schema for the postgresdatabases API. +type PostgresDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresDatabaseSpec `json:"spec,omitempty"` + Status PostgresDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresDatabaseList contains a list of PostgresDatabase. +type PostgresDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PostgresDatabase{}, &PostgresDatabaseList{}) +} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 93e988463..32f7bc429 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ package v4 import ( "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -180,6 +181,26 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNPGConfig) DeepCopyInto(out *CNPGConfig) { + *out = *in + if in.ConnectionPooler != nil { + in, out := &in.ConnectionPooler, &out.ConnectionPooler + *out = new(ConnectionPoolerConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNPGConfig. +func (in *CNPGConfig) DeepCopy() *CNPGConfig { + if in == nil { + return nil + } + out := new(CNPGConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -355,6 +376,108 @@ func (in *CommonSplunkSpec) DeepCopy() *CommonSplunkSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolerConfig) DeepCopyInto(out *ConnectionPoolerConfig) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(ConnectionPoolerMode) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerConfig. +func (in *ConnectionPoolerConfig) DeepCopy() *ConnectionPoolerConfig { + if in == nil { + return nil + } + out := new(ConnectionPoolerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolerStatus) DeepCopyInto(out *ConnectionPoolerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerStatus. +func (in *ConnectionPoolerStatus) DeepCopy() *ConnectionPoolerStatus { + if in == nil { + return nil + } + out := new(ConnectionPoolerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDefinition) DeepCopyInto(out *DatabaseDefinition) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDefinition. +func (in *DatabaseDefinition) DeepCopy() *DatabaseDefinition { + if in == nil { + return nil + } + out := new(DatabaseDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseInfo) DeepCopyInto(out *DatabaseInfo) { + *out = *in + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.AdminUserSecretRef != nil { + in, out := &in.AdminUserSecretRef, &out.AdminUserSecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.RWUserSecretRef != nil { + in, out := &in.RWUserSecretRef, &out.RWUserSecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInfo. +func (in *DatabaseInfo) DeepCopy() *DatabaseInfo { + if in == nil { + return nil + } + out := new(DatabaseInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EsDefaults) DeepCopyInto(out *EsDefaults) { *out = *in @@ -647,6 +770,58 @@ func (in *LicenseManagerStatus) DeepCopy() *LicenseManagerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRole) DeepCopyInto(out *ManagedRole) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRole. +func (in *ManagedRole) DeepCopy() *ManagedRole { + if in == nil { + return nil + } + out := new(ManagedRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRolesStatus) DeepCopyInto(out *ManagedRolesStatus) { + *out = *in + if in.Reconciled != nil { + in, out := &in.Reconciled, &out.Reconciled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Pending != nil { + in, out := &in.Pending, &out.Pending + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Failed != nil { + in, out := &in.Failed, &out.Failed + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRolesStatus. +func (in *ManagedRolesStatus) DeepCopy() *ManagedRolesStatus { + if in == nil { + return nil + } + out := new(ManagedRolesStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonitoringConsole) DeepCopyInto(out *MonitoringConsole) { *out = *in @@ -762,6 +937,461 @@ func (in *PhaseInfo) DeepCopy() *PhaseInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PosgresClusterClassConfig) DeepCopyInto(out *PosgresClusterClassConfig) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } + if in.PostgresVersion != nil { + in, out := &in.PostgresVersion, &out.PostgresVersion + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.PostgreSQLConfig != nil { + in, out := &in.PostgreSQLConfig, &out.PostgreSQLConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PgHBA != nil { + in, out := &in.PgHBA, &out.PgHBA + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConnectionPoolerEnabled != nil { + in, out := &in.ConnectionPoolerEnabled, &out.ConnectionPoolerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosgresClusterClassConfig. +func (in *PosgresClusterClassConfig) DeepCopy() *PosgresClusterClassConfig { + if in == nil { + return nil + } + out := new(PosgresClusterClassConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresCluster) DeepCopyInto(out *PostgresCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresCluster. +func (in *PostgresCluster) DeepCopy() *PostgresCluster { + if in == nil { + return nil + } + out := new(PostgresCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClass) DeepCopyInto(out *PostgresClusterClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClass. +func (in *PostgresClusterClass) DeepCopy() *PostgresClusterClass { + if in == nil { + return nil + } + out := new(PostgresClusterClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresClusterClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassList) DeepCopyInto(out *PostgresClusterClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresClusterClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassList. +func (in *PostgresClusterClassList) DeepCopy() *PostgresClusterClassList { + if in == nil { + return nil + } + out := new(PostgresClusterClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresClusterClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassSpec) DeepCopyInto(out *PostgresClusterClassSpec) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.CNPG != nil { + in, out := &in.CNPG, &out.CNPG + *out = new(CNPGConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassSpec. +func (in *PostgresClusterClassSpec) DeepCopy() *PostgresClusterClassSpec { + if in == nil { + return nil + } + out := new(PostgresClusterClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassStatus) DeepCopyInto(out *PostgresClusterClassStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassStatus. +func (in *PostgresClusterClassStatus) DeepCopy() *PostgresClusterClassStatus { + if in == nil { + return nil + } + out := new(PostgresClusterClassStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterList) DeepCopyInto(out *PostgresClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterList. +func (in *PostgresClusterList) DeepCopy() *PostgresClusterList { + if in == nil { + return nil + } + out := new(PostgresClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterResources) DeepCopyInto(out *PostgresClusterResources) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterResources. +func (in *PostgresClusterResources) DeepCopy() *PostgresClusterResources { + if in == nil { + return nil + } + out := new(PostgresClusterResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.PostgresVersion != nil { + in, out := &in.PostgresVersion, &out.PostgresVersion + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.PostgreSQLConfig != nil { + in, out := &in.PostgreSQLConfig, &out.PostgreSQLConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PgHBA != nil { + in, out := &in.PgHBA, &out.PgHBA + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConnectionPoolerEnabled != nil { + in, out := &in.ConnectionPoolerEnabled, &out.ConnectionPoolerEnabled + *out = new(bool) + **out = **in + } + if in.ConnectionPoolerConfig != nil { + in, out := &in.ConnectionPoolerConfig, &out.ConnectionPoolerConfig + *out = new(ConnectionPoolerConfig) + (*in).DeepCopyInto(*out) + } + if in.ManagedRoles != nil { + in, out := &in.ManagedRoles, &out.ManagedRoles + *out = make([]ManagedRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterSpec. +func (in *PostgresClusterSpec) DeepCopy() *PostgresClusterSpec { + if in == nil { + return nil + } + out := new(PostgresClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProvisionerRef != nil { + in, out := &in.ProvisionerRef, &out.ProvisionerRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.ConnectionPoolerStatus != nil { + in, out := &in.ConnectionPoolerStatus, &out.ConnectionPoolerStatus + *out = new(ConnectionPoolerStatus) + **out = **in + } + if in.ManagedRolesStatus != nil { + in, out := &in.ManagedRolesStatus, &out.ManagedRolesStatus + *out = new(ManagedRolesStatus) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(PostgresClusterResources) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterStatus. +func (in *PostgresClusterStatus) DeepCopy() *PostgresClusterStatus { + if in == nil { + return nil + } + out := new(PostgresClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabase) DeepCopyInto(out *PostgresDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabase. +func (in *PostgresDatabase) DeepCopy() *PostgresDatabase { + if in == nil { + return nil + } + out := new(PostgresDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabaseList) DeepCopyInto(out *PostgresDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabaseList. +func (in *PostgresDatabaseList) DeepCopy() *PostgresDatabaseList { + if in == nil { + return nil + } + out := new(PostgresDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabaseSpec) DeepCopyInto(out *PostgresDatabaseSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]DatabaseDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabaseSpec. +func (in *PostgresDatabaseSpec) DeepCopy() *PostgresDatabaseSpec { + if in == nil { + return nil + } + out := new(PostgresDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabaseStatus) DeepCopyInto(out *PostgresDatabaseStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]DatabaseInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabaseStatus. +func (in *PostgresDatabaseStatus) DeepCopy() *PostgresDatabaseStatus { + if in == nil { + return nil + } + out := new(PostgresDatabaseStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PremiumAppsProps) DeepCopyInto(out *PremiumAppsProps) { *out = *in diff --git a/bundle.Dockerfile b/bundle.Dockerfile index c16e98425..396f16e00 100644 --- a/bundle.Dockerfile +++ b/bundle.Dockerfile @@ -9,7 +9,7 @@ LABEL operators.operatorframework.io.bundle.channels.v1=stable LABEL operators.operatorframework.io.bundle.channel.default.v1: stable LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.39.0 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 -LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 # Labels for testing. LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 diff --git a/bundle/manifests/enterprise.splunk.com_databaseclasses.yaml b/bundle/manifests/enterprise.splunk.com_databaseclasses.yaml new file mode 100644 index 000000000..ce8cd63c8 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_databaseclasses.yaml @@ -0,0 +1,62 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + labels: + name: splunk-operator + name: databaseclasses.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: DatabaseClass + listKind: DatabaseClassList + plural: databaseclasses + singular: databaseclass + scope: Namespaced + versions: + - name: v4 + schema: + openAPIV3Schema: + description: DatabaseClass is the Schema for the databaseclasses API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DatabaseClassSpec defines the desired state of DatabaseClass. + properties: + foo: + description: Foo is an example field of DatabaseClass. Edit databaseclass_types.go + to remove/update + type: string + type: object + status: + description: DatabaseClassStatus defines the observed state of DatabaseClass. + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_databases.yaml b/bundle/manifests/enterprise.splunk.com_databases.yaml new file mode 100644 index 000000000..203cf3cd8 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_databases.yaml @@ -0,0 +1,62 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + labels: + name: splunk-operator + name: databases.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: Database + listKind: DatabaseList + plural: databases + singular: database + scope: Namespaced + versions: + - name: v4 + schema: + openAPIV3Schema: + description: Database is the Schema for the databases API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DatabaseSpec defines the desired state of Database. + properties: + foo: + description: Foo is an example field of Database. Edit database_types.go + to remove/update + type: string + type: object + status: + description: DatabaseStatus defines the observed state of Database. + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/splunk-operator.clusterserviceversion.yaml b/bundle/manifests/splunk-operator.clusterserviceversion.yaml index 9ee619ae5..431179e7e 100644 --- a/bundle/manifests/splunk-operator.clusterserviceversion.yaml +++ b/bundle/manifests/splunk-operator.clusterserviceversion.yaml @@ -65,7 +65,31 @@ metadata: "metadata": { "name": "clustermanager-sample" }, - "spec": {} + "spec": null + }, + { + "apiVersion": "enterprise.splunk.com/v4", + "kind": "Database", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "splunk-operator" + }, + "name": "database-sample" + }, + "spec": null + }, + { + "apiVersion": "enterprise.splunk.com/v4", + "kind": "DatabaseClass", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "splunk-operator" + }, + "name": "databaseclass-sample" + }, + "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", @@ -73,7 +97,7 @@ metadata: "metadata": { "name": "indexercluster-sample" }, - "spec": {} + "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", @@ -81,7 +105,7 @@ metadata: "metadata": { "name": "licensemanager-sample" }, - "spec": {} + "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", @@ -89,7 +113,7 @@ metadata: "metadata": { "name": "monitoringconsole-sample" }, - "spec": {} + "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", @@ -97,7 +121,7 @@ metadata: "metadata": { "name": "searchheadcluster-sample" }, - "spec": {} + "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", @@ -105,19 +129,19 @@ metadata: "metadata": { "name": "standalone-sample" }, - "spec": {} + "spec": null } ] capabilities: Seamless Upgrades categories: Big Data, Logging & Tracing, Monitoring, Security, AI/Machine Learning containerImage: splunk/splunk-operator@sha256:c4e0d314622699496f675760aad314520d050a66627fdf33e1e21fa28ca85d50 - createdAt: "2025-09-08T10:17:59Z" + createdAt: "2026-01-05T14:32:06Z" description: The Splunk Operator for Kubernetes enables you to quickly and easily deploy Splunk Enterprise on your choice of private or public cloud provider. The Operator simplifies scaling and management of Splunk Enterprise by automating administrative workflows using Kubernetes best practices. olm.properties: '[{"type": "olm.maxOpenShiftVersion", "value": "4.19"}]' - operators.operatorframework.io/builder: operator-sdk-v1.39.0 + operators.operatorframework.io/builder: operator-sdk-v1.42.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 repository: https://github.com/splunk/splunk-operator name: splunk-operator.v3.0.0 @@ -142,6 +166,16 @@ spec: - kind: ClusterMaster name: clustermasters.enterprise.splunk.com version: v2 + - description: DatabaseClass is the Schema for the databaseclasses API. + displayName: Database Class + kind: DatabaseClass + name: databaseclasses.enterprise.splunk.com + version: v4 + - description: Database is the Schema for the databases API. + displayName: Database + kind: Database + name: databases.enterprise.splunk.com + version: v4 - description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster displayName: Indexer Cluster kind: IndexerCluster @@ -275,6 +309,8 @@ spec: resources: - clustermanagers - clustermasters + - databaseclasses + - databases - indexerclusters - licensemanagers - licensemasters @@ -294,6 +330,8 @@ spec: resources: - clustermanagers/finalizers - clustermasters/finalizers + - databaseclasses/finalizers + - databases/finalizers - indexerclusters/finalizers - licensemanagers/finalizers - licensemasters/finalizers @@ -307,6 +345,8 @@ spec: resources: - clustermanagers/status - clustermasters/status + - databaseclasses/status + - databases/status - indexerclusters/status - licensemanagers/status - licensemasters/status @@ -366,7 +406,7 @@ spec: fieldRef: fieldPath: metadata.annotations['olm.targetNamespaces'] - name: RELATED_IMAGE_SPLUNK_ENTERPRISE - value: docker.io/splunk/splunk:10.0.0 + value: docker.io/splunk/splunk - name: OPERATOR_NAME value: splunk-operator - name: SPLUNK_GENERAL_TERMS @@ -375,7 +415,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name - image: docker.io/splunk/splunk-operator:3.0.0 + image: localhost:5001/sok-operator-mploski:v1.0.0 imagePullPolicy: Always livenessProbe: httpGet: @@ -488,7 +528,7 @@ spec: name: Splunk Inc. url: www.splunk.com relatedImages: - - image: docker.io/splunk/splunk:10.0.0 + - image: docker.io/splunk/splunk name: splunk-enterprise replaces: splunk-operator.v2.8.1 version: 3.0.0 diff --git a/bundle/metadata/annotations.yaml b/bundle/metadata/annotations.yaml index 890fd61c7..87b0891ef 100644 --- a/bundle/metadata/annotations.yaml +++ b/bundle/metadata/annotations.yaml @@ -4,11 +4,10 @@ annotations: operators.operatorframework.io.bundle.manifests.v1: manifests/ operators.operatorframework.io.bundle.metadata.v1: metadata/ operators.operatorframework.io.bundle.package.v1: splunk-operator - operators.operatorframework.io.bundle.channels.v1: stable - operators.operatorframework.io.bundle.channel.default.v1: stable - operators.operatorframework.io.metrics.builder: operator-sdk-v1.31.0 + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-v1.42.0 operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 - operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 # Annotations for testing. operators.operatorframework.io.test.mediatype.v1: scorecard+v1 diff --git a/cmd/main.go b/cmd/main.go index ec286826d..e696d9d27 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -23,10 +23,11 @@ import ( "os" "time" + "sigs.k8s.io/controller-runtime/pkg/metrics/filters" + intController "github.com/splunk/splunk-operator/internal/controller" "github.com/splunk/splunk-operator/internal/controller/debug" "github.com/splunk/splunk-operator/pkg/config" - "sigs.k8s.io/controller-runtime/pkg/metrics/filters" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. @@ -47,6 +48,10 @@ import ( enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" //+kubebuilder:scaffold:imports //extapi "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) @@ -60,6 +65,8 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(enterpriseApi.AddToScheme(scheme)) utilruntime.Must(enterpriseApiV3.AddToScheme(scheme)) + utilruntime.Must(enterprisev4.AddToScheme(scheme)) + utilruntime.Must(cnpgv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme //utilruntime.Must(extapi.AddToScheme(scheme)) } @@ -229,6 +236,21 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Standalone") os.Exit(1) } + if err := (&controller.PostgresDatabaseReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PostgresDatabase") + os.Exit(1) + } + if err := (&controller.PostgresClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PostgresCluster") + os.Exit(1) + } + //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml index c393cdfdc..51097dfd1 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml @@ -354,7 +354,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -369,7 +368,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -535,7 +533,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -550,7 +547,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -643,8 +639,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -713,7 +709,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -728,7 +723,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -894,7 +888,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -909,7 +902,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1274,7 +1266,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1332,6 +1326,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1624,7 +1655,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2026,13 +2057,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2160,6 +2190,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2500,7 +2532,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2511,7 +2542,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2581,6 +2611,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2612,8 +2644,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2651,8 +2685,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2671,8 +2707,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2724,6 +2761,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2833,8 +2872,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3236,15 +3274,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3300,6 +3336,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3345,9 +3382,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3363,6 +3400,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3398,7 +3437,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3422,12 +3461,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3481,7 +3519,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3506,7 +3544,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3631,8 +3669,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3648,8 +3687,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3922,6 +3964,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4014,8 +4161,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -4054,7 +4202,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4126,8 +4274,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4259,8 +4408,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4305,8 +4455,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml index bfd9e330d..6dd79c1bb 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml @@ -350,7 +350,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -365,7 +364,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -531,7 +529,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -546,7 +543,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -639,8 +635,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -709,7 +705,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -724,7 +719,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -890,7 +884,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -905,7 +898,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1270,7 +1262,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1328,6 +1322,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1620,7 +1651,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2022,13 +2053,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2156,6 +2186,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2496,7 +2528,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2507,7 +2538,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2577,6 +2607,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2608,8 +2640,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2647,8 +2681,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2667,8 +2703,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2720,6 +2757,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2829,8 +2868,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3232,15 +3270,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3296,6 +3332,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3341,9 +3378,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3359,6 +3396,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3394,7 +3433,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3418,12 +3457,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3477,7 +3515,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3502,7 +3540,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3627,8 +3665,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3644,8 +3683,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3918,6 +3960,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4010,8 +4157,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -4050,7 +4198,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4122,8 +4270,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4255,8 +4404,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4301,8 +4451,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 5e30a273f..7f5f20efc 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -357,7 +357,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -372,7 +371,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -538,7 +536,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -553,7 +550,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -646,8 +642,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -716,7 +712,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -731,7 +726,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -897,7 +891,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -912,7 +905,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1122,7 +1114,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1180,6 +1174,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1477,7 +1508,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -1879,13 +1910,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2013,6 +2043,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2236,7 +2268,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2247,7 +2278,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2317,6 +2347,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2348,8 +2380,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2387,8 +2421,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2407,8 +2443,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2460,6 +2497,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2569,8 +2608,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -2972,15 +3010,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3036,6 +3072,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3081,9 +3118,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3099,6 +3136,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3134,7 +3173,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3158,12 +3197,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3217,7 +3255,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3242,7 +3280,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3367,8 +3405,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3384,8 +3423,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3658,6 +3700,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3750,8 +3897,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3790,7 +3938,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -3862,8 +4010,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -3995,8 +4144,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4041,8 +4191,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -4529,7 +4681,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4544,7 +4695,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4710,7 +4860,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4725,7 +4874,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4818,8 +4966,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -4888,7 +5036,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4903,7 +5050,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5069,7 +5215,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5084,7 +5229,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5294,7 +5438,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5352,6 +5498,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5649,7 +5832,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6051,13 +6234,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -6185,6 +6367,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -6408,7 +6592,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -6419,7 +6602,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -6489,6 +6671,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -6520,8 +6704,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -6559,8 +6745,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -6579,8 +6767,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -6632,6 +6821,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -6741,8 +6932,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7144,15 +7334,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7208,6 +7396,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -7253,9 +7442,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -7271,6 +7460,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -7306,7 +7497,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -7330,12 +7521,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -7389,7 +7579,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -7414,7 +7604,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -7539,8 +7729,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -7556,8 +7747,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -7830,6 +8024,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -7922,8 +8221,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -7962,7 +8262,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8034,8 +8334,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -8167,8 +8468,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -8213,8 +8515,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml index ed6181c82..34cb1f14b 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml @@ -344,7 +344,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -359,7 +358,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -525,7 +523,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -540,7 +537,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -633,8 +629,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -703,7 +699,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -718,7 +713,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -884,7 +878,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -899,7 +892,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1264,7 +1256,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1322,6 +1316,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1614,7 +1645,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2016,13 +2047,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2150,6 +2180,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2373,7 +2405,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2384,7 +2415,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2454,6 +2484,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2485,8 +2517,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2524,8 +2558,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2544,8 +2580,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2597,6 +2634,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2706,8 +2745,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3109,15 +3147,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3173,6 +3209,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3218,9 +3255,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3236,6 +3273,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3271,7 +3310,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3295,12 +3334,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3354,7 +3392,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3379,7 +3417,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3504,8 +3542,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3521,8 +3560,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3795,6 +3837,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3887,8 +4034,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3927,7 +4075,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -3999,8 +4147,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4132,8 +4281,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4178,8 +4328,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml index 85702267d..419e700ef 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml @@ -339,7 +339,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -354,7 +353,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -520,7 +518,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -535,7 +532,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -628,8 +624,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -698,7 +694,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -713,7 +708,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -879,7 +873,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -894,7 +887,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1259,7 +1251,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1317,6 +1311,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1609,7 +1640,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2011,13 +2042,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2145,6 +2175,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2368,7 +2400,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2379,7 +2410,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2449,6 +2479,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2480,8 +2512,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2519,8 +2553,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2539,8 +2575,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2592,6 +2629,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2701,8 +2740,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3104,15 +3142,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3168,6 +3204,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3213,9 +3250,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3231,6 +3268,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3266,7 +3305,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3290,12 +3329,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3349,7 +3387,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3374,7 +3412,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3499,8 +3537,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3516,8 +3555,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3790,6 +3832,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3882,8 +4029,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3922,7 +4070,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -3994,8 +4142,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4127,8 +4276,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4173,8 +4323,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml index 2700bb371..2dcb4e2c4 100644 --- a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml +++ b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml @@ -346,7 +346,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -361,7 +360,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -527,7 +525,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -542,7 +539,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -635,8 +631,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -705,7 +701,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -720,7 +715,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -886,7 +880,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -901,7 +894,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1266,7 +1258,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1324,6 +1318,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1616,7 +1647,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2018,13 +2049,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2152,6 +2182,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2375,7 +2407,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2386,7 +2417,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2456,6 +2486,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2487,8 +2519,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2526,8 +2560,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2546,8 +2582,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2599,6 +2636,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2708,8 +2747,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3111,15 +3149,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3175,6 +3211,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3220,9 +3257,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3238,6 +3275,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3273,7 +3312,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3297,12 +3336,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3356,7 +3394,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3381,7 +3419,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3506,8 +3544,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3523,8 +3562,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3797,6 +3839,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3889,8 +4036,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3929,7 +4077,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4001,8 +4149,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4134,8 +4283,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4180,8 +4330,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -4863,7 +5015,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4878,7 +5029,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5044,7 +5194,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5059,7 +5208,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5152,8 +5300,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5222,7 +5370,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5237,7 +5384,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5403,7 +5549,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5418,7 +5563,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5783,7 +5927,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5841,6 +5987,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6133,7 +6316,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6535,13 +6718,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -6669,6 +6851,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -6892,7 +7076,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -6903,7 +7086,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -6973,6 +7155,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -7004,8 +7188,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -7043,8 +7229,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -7063,8 +7251,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -7116,6 +7305,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7225,8 +7416,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7628,15 +7818,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7692,6 +7880,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -7737,9 +7926,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -7755,6 +7944,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -7790,7 +7981,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -7814,12 +8005,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -7873,7 +8063,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -7898,7 +8088,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8023,8 +8213,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -8040,8 +8231,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -8314,6 +8508,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8406,8 +8705,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8446,7 +8746,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8518,8 +8818,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -8651,8 +8952,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -8697,8 +8999,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml b/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml new file mode 100644 index 000000000..b6c333bd0 --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml @@ -0,0 +1,324 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: postgresclusterclasses.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresClusterClass + listKind: PostgresClusterClassList + plural: postgresclusterclasses + singular: postgresclusterclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.provisioner + name: Provisioner + type: string + - jsonPath: .spec.postgresClusterConfig.instances + name: Instances + type: integer + - jsonPath: .spec.postgresClusterConfig.storage + name: Storage + type: string + - jsonPath: .spec.postgresClusterConfig.postgresVersion + name: Version + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterClassSpec defines the desired state of PostgresClusterClass. + PostgresClusterClass is immutable after creation - it serves as a template for Cluster CRs. + properties: + cnpg: + description: |- + CNPG contains CloudNativePG-specific configuration and policies. + Only used when Provisioner is "postgresql.cnpg.io" + These settings CANNOT be overridden in PostgresCluster CR (platform policy). + properties: + connectionPooler: + description: |- + ConnectionPooler contains PgBouncer connection pooler configuration. + When enabled, creates RW and RO pooler deployments for clusters using this class. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + primaryUpdateMethod: + default: switchover + description: |- + PrimaryUpdateMethod determines how the primary instance is updated. + "restart" - tolerate brief downtime (suitable for development) + "switchover" - minimal downtime via automated failover (production-grade) + + NOTE: When using "switchover", ensure clusterConfig.instances > 1. + Switchover requires at least one replica to fail over to. + enum: + - restart + - switchover + type: string + type: object + config: + default: {} + description: |- + PostgresClusterConfig contains cluster-level configuration. + These settings apply to PostgresCluster infrastructure. + Can be overridden in PostgresCluster CR. + properties: + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed. + When true, creates RW and RO pooler deployments for clusters using this class. + Can be overridden in PostgresCluster CR. + type: boolean + instances: + default: 1 + description: |- + Instances is the number of database instances (1 primary + N replicas). + Single instance (1) is suitable for development. + High availability requires at least 3 instances (1 primary + 2 replicas). + format: int32 + maximum: 10 + minimum: 1 + type: integer + pgHBA: + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + default: "18" + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + description: |- + PostgreSQLConfig contains PostgreSQL engine configuration parameters. + Maps to postgresql.conf settings (cluster-wide). + Example: {"max_connections": "200", "shared_buffers": "2GB"} + type: object + resources: + description: |- + Resources defines CPU and memory requests/limits per instance. + All instances in the cluster have the same resources. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + default: 50Gi + description: |- + Storage is the size of persistent volume for each instance. + Cannot be decreased after cluster creation (PostgreSQL limitation). + Recommended minimum: 10Gi for production viability. + Example: "50Gi", "100Gi", "1Ti" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + provisioner: + description: |- + Provisioner identifies which database provisioner to use. + Currently supported: "postgresql.cnpg.io" (CloudNativePG) + enum: + - postgresql.cnpg.io + type: string + required: + - provisioner + type: object + x-kubernetes-validations: + - message: cnpg.connectionPooler must be set when config.connectionPoolerEnabled + is true + rule: '!has(self.config) || !has(self.config.connectionPoolerEnabled) + || !self.config.connectionPoolerEnabled || (has(self.cnpg) && has(self.cnpg.connectionPooler))' + status: + description: PostgresClusterClassStatus defines the observed state of + PostgresClusterClass. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresClusterClass state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + phase: + description: |- + Phase represents the current phase of the PostgresClusterClass. + Valid phases: "Ready", "Invalid" + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml b/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml new file mode 100644 index 000000000..ae0b2aa2c --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml @@ -0,0 +1,455 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: postgresclusters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresCluster + listKind: PostgresClusterList + plural: postgresclusters + singular: postgrescluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.class + name: Class + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresCluster is the Schema for the postgresclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterSpec defines the desired state of PostgresCluster. + Validation rules ensure immutability of Class, and that Storage and PostgresVersion can only be set once and cannot be removed or downgraded. + properties: + class: + description: This field is IMMUTABLE after creation. + minLength: 1 + type: string + x-kubernetes-validations: + - message: class is immutable + rule: self == oldSelf + clusterDeletionPolicy: + default: Retain + description: ClusterDeletionPolicy controls the deletion behavior + of the underlying CNPG Cluster when the PostgresCluster is deleted. + enum: + - Delete + - Retain + type: string + connectionPoolerConfig: + description: Only takes effect when connection pooling is enabled. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed for this cluster. + When set, takes precedence over the class-level connectionPoolerEnabled value. + type: boolean + instances: + description: Instances overrides the number of PostgreSQL instances + from ClusterClass. + format: int32 + maximum: 10 + minimum: 1 + type: integer + managedRoles: + description: |- + ManagedRoles contains PostgreSQL roles that should be created in the cluster. + This field supports Server-Side Apply with per-role granularity, allowing + multiple PostgresDatabase controllers to manage different roles independently. + items: + description: ManagedRole represents a PostgreSQL role to be created + and managed in the cluster. + properties: + ensure: + default: present + description: Ensure controls whether the role should exist (present) + or not (absent). + enum: + - present + - absent + type: string + name: + description: Name of the role/user to create. + maxLength: 63 + minLength: 1 + type: string + passwordSecretRef: + description: |- + PasswordSecretRef references a Secret containing the password for this role. + The Secret should have a key "password" with the password value. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + pgHBA: + default: [] + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Maps to pg_hba.conf settings. + Default empty array prevents panic. + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + default: {} + description: |- + PostgreSQL overrides PostgreSQL engine parameters from ClusterClass. + Maps to postgresql.conf settings. + Default empty map prevents panic. + Example: {"shared_buffers": "128MB", "log_min_duration_statement": "500ms"} + type: object + resources: + description: Resources overrides CPU/memory resources from ClusterClass. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + description: |- + Storage overrides the storage size from ClusterClass. + Example: "5Gi" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - class + type: object + x-kubernetes-validations: + - messageExpression: '!has(self.postgresVersion) ? ''postgresVersion cannot + be removed once set (was: '' + oldSelf.postgresVersion + '')'' : ''postgresVersion + major version cannot be downgraded (from: '' + oldSelf.postgresVersion + + '', to: '' + self.postgresVersion + '')''' + rule: '!has(oldSelf.postgresVersion) || (has(self.postgresVersion) && + int(self.postgresVersion.split(''.'')[0]) >= int(oldSelf.postgresVersion.split(''.'')[0]))' + - messageExpression: '!has(self.storage) ? ''storage cannot be removed + once set (was: '' + string(oldSelf.storage) + '')'' : ''storage size + cannot be decreased (from: '' + string(oldSelf.storage) + '', to: + '' + string(self.storage) + '')''' + rule: '!has(oldSelf.storage) || (has(self.storage) && quantity(self.storage).compareTo(quantity(oldSelf.storage)) + >= 0)' + - message: connectionPoolerConfig cannot be overridden on PostgresCluster + rule: '!has(self.connectionPoolerConfig)' + status: + description: PostgresClusterStatus defines the observed state of PostgresCluster. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresCluster's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + connectionPoolerStatus: + description: |- + ConnectionPoolerStatus contains the observed state of the connection pooler. + Only populated when connection pooler is enabled in the PostgresClusterClass. + properties: + enabled: + description: Enabled indicates whether pooler is active for this + cluster. + type: boolean + type: object + managedRolesStatus: + description: ManagedRolesStatus tracks the reconciliation status of + managed roles. + properties: + failed: + additionalProperties: + type: string + description: Failed contains roles that failed to reconcile with + error messages. + type: object + pending: + description: Pending contains roles that are being created but + not yet ready. + items: + type: string + type: array + reconciled: + description: Reconciled contains roles that have been successfully + created and are ready. + items: + type: string + type: array + type: object + phase: + description: |- + Phase represents the current phase of the PostgresCluster. + Values: "Pending", "Provisioning", "Failed", "Ready", "Deleting" + type: string + provisionerRef: + description: |- + ProvisionerRef contains reference to the provisioner resource managing this PostgresCluster. + Right now, only CNPG is supported. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + resources: + description: Resources contains references to related Kubernetes resources + like ConfigMaps and Secrets. + properties: + configMapRef: + description: |- + ConfigMapRef references the ConfigMap with connection endpoints. + Contains: CLUSTER_ENDPOINTS, POOLER_ENDPOINTS (if connection pooler enabled) + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + secretRef: + description: |- + SecretRef references the Secret with superuser credentials. + Contains: passwords for superuser + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml b/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml new file mode 100644 index 000000000..8de8462c3 --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml @@ -0,0 +1,244 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: postgresdatabases.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresDatabase + listKind: PostgresDatabaseList + plural: postgresdatabases + singular: postgresdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.clusterRef.name + name: Cluster + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresDatabase is the Schema for the postgresdatabases API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PostgresDatabaseSpec defines the desired state of PostgresDatabase. + properties: + clusterRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databases: + items: + properties: + deletionPolicy: + default: Delete + enum: + - Delete + - Retain + type: string + extensions: + items: + type: string + type: array + name: + maxLength: 30 + type: string + required: + - name + type: object + maxItems: 10 + minItems: 1 + type: array + x-kubernetes-validations: + - message: database names must be unique + rule: self.all(x, self.filter(y, y.name == x.name).size() == 1) + required: + - clusterRef + - databases + type: object + x-kubernetes-validations: + - message: clusterRef is immutable + rule: self.clusterRef == oldSelf.clusterRef + status: + description: PostgresDatabaseStatus defines the observed state of PostgresDatabase. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + databases: + items: + properties: + adminUserSecretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + configMap: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + name: + type: string + ready: + type: boolean + rwUserSecretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + type: object + type: array + observedGeneration: + description: ObservedGeneration represents the .metadata.generation + that the status was set based upon. + format: int64 + type: integer + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml index 122f4d3bc..f717bb544 100644 --- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml @@ -352,7 +352,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -367,7 +366,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -533,7 +531,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -548,7 +545,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -641,8 +637,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -711,7 +707,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -726,7 +721,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -892,7 +886,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -907,7 +900,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1272,7 +1264,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1330,6 +1324,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1627,7 +1658,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2029,13 +2060,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2163,6 +2193,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2386,7 +2418,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2397,7 +2428,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2467,6 +2497,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2498,8 +2530,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2537,8 +2571,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2557,8 +2593,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2610,6 +2647,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2719,8 +2758,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3122,15 +3160,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3186,6 +3222,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3231,9 +3268,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3249,6 +3286,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3284,7 +3323,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3308,12 +3347,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3367,7 +3405,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3392,7 +3430,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3517,8 +3555,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3534,8 +3573,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3808,6 +3850,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3900,8 +4047,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3940,7 +4088,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4012,8 +4160,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4145,8 +4294,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4191,8 +4341,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -4956,7 +5108,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4971,7 +5122,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5137,7 +5287,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5152,7 +5301,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5245,8 +5393,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5315,7 +5463,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5330,7 +5477,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5496,7 +5642,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5511,7 +5656,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -6056,7 +6200,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6133,7 +6277,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6191,6 +6337,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6488,7 +6671,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6890,13 +7073,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -7024,6 +7206,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -7247,7 +7431,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -7258,7 +7441,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -7328,6 +7510,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -7359,8 +7543,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -7398,8 +7584,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -7418,8 +7606,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -7471,6 +7660,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7580,8 +7771,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7983,15 +8173,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8047,6 +8235,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -8092,9 +8281,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -8110,6 +8299,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -8145,7 +8336,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -8169,12 +8360,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8228,7 +8418,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -8253,7 +8443,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8378,8 +8568,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -8395,8 +8586,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -8669,6 +8863,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8761,8 +9060,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8801,7 +9101,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8873,8 +9173,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -9006,8 +9307,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -9052,8 +9354,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_standalones.yaml b/config/crd/bases/enterprise.splunk.com_standalones.yaml index 387a408f0..a6e7d4b14 100644 --- a/config/crd/bases/enterprise.splunk.com_standalones.yaml +++ b/config/crd/bases/enterprise.splunk.com_standalones.yaml @@ -347,7 +347,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -362,7 +361,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -528,7 +526,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -543,7 +540,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -636,8 +632,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -706,7 +702,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -721,7 +716,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -887,7 +881,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -902,7 +895,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1267,7 +1259,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1325,6 +1319,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1621,7 +1652,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2023,13 +2054,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2157,6 +2187,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2497,7 +2529,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2508,7 +2539,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2578,6 +2608,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2609,8 +2641,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2648,8 +2682,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2668,8 +2704,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2721,6 +2758,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2830,8 +2869,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3233,15 +3271,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3297,6 +3333,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3342,9 +3379,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3360,6 +3397,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3395,7 +3434,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3419,12 +3458,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3478,7 +3516,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3503,7 +3541,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3628,8 +3666,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3645,8 +3684,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3919,6 +3961,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4011,8 +4158,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -4051,7 +4199,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4123,8 +4271,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4256,8 +4405,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4302,8 +4452,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -5108,7 +5260,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5123,7 +5274,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5289,7 +5439,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5304,7 +5453,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5397,8 +5545,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5467,7 +5615,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5482,7 +5629,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5648,7 +5794,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5663,7 +5808,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -6028,7 +6172,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6086,6 +6232,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6382,7 +6565,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6784,13 +6967,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -6918,6 +7100,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -7258,7 +7442,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -7269,7 +7452,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -7339,6 +7521,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -7370,8 +7554,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -7409,8 +7595,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -7429,8 +7617,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -7482,6 +7671,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7591,8 +7782,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7994,15 +8184,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8058,6 +8246,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -8103,9 +8292,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -8121,6 +8310,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -8156,7 +8347,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -8180,12 +8371,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8239,7 +8429,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -8264,7 +8454,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8389,8 +8579,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -8406,8 +8597,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -8680,6 +8874,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8772,8 +9071,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8812,7 +9112,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8884,8 +9184,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -9017,8 +9318,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -9063,8 +9365,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index dd0d870ec..b527807c6 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -10,6 +10,9 @@ resources: - bases/enterprise.splunk.com_monitoringconsoles.yaml - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml +- bases/enterprise.splunk.com_postgresdatabases.yaml +- bases/enterprise.splunk.com_postgresclusterclasses.yaml +- bases/enterprise.splunk.com_postgresclusters.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/manager/controller_manager_telemetry.yaml b/config/manager/controller_manager_telemetry.yaml new file mode 100644 index 000000000..2ccc8d264 --- /dev/null +++ b/config/manager/controller_manager_telemetry.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: manager-telemetry +data: + status: | + { + "lastTransmission": "", + "test": "false", + "sokVersion": "3.1.0" + } diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 47f07b0e6..e7bb8fcdb 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,5 +1,6 @@ resources: - manager.yaml +- controller_manager_telemetry.yaml generatorOptions: disableNameSuffixHash: true @@ -17,4 +18,4 @@ kind: Kustomization images: - name: controller newName: docker.io/splunk/splunk-operator - newTag: 3.0.0 + newTag: 3.1.0 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 3974d02f0..f03f5ec9e 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -52,6 +52,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + ports: [] securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true @@ -85,6 +86,7 @@ spec: volumeMounts: - mountPath: /opt/splunk/appframework/ name: app-staging + # Additional volumeMounts will be added by patches for webhook and metrics certs serviceAccountName: controller-manager volumes: - configMap: diff --git a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml index 92ee35e78..ad90c9cdb 100644 --- a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml @@ -28,6 +28,16 @@ spec: kind: ClusterMaster name: clustermasters.enterprise.splunk.com version: v3 + - description: DatabaseClass is the Schema for the databaseclasses API. + displayName: Database Class + kind: DatabaseClass + name: databaseclasses.enterprise.splunk.com + version: v4 + - description: Database is the Schema for the databases API. + displayName: Database + kind: Database + name: databases.enterprise.splunk.com + version: v4 - description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster displayName: Indexer Cluster kind: IndexerCluster diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 3577dde2a..4ed7851d2 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -17,4 +17,18 @@ resources: # More info: https://book.kubebuilder.io/reference/metrics.html - metrics_auth_role.yaml - metrics_auth_role_binding.yaml -- metrics_reader_role.yaml \ No newline at end of file +- metrics_reader_role.yaml +# For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by +# default, aiding admins in cluster management. Those roles are +# not used by the splunk-operator itself. You can comment the following lines +# if you do not want those helpers be installed with your Project. +- postgrescluster_admin_role.yaml +- postgrescluster_editor_role.yaml +- postgrescluster_viewer_role.yaml +- postgresclusterclass_admin_role.yaml +- postgresclusterclass_editor_role.yaml +- postgresclusterclass_viewer_role.yaml +- postgresdatabase_admin_role.yaml +- postgresdatabase_editor_role.yaml +- postgresdatabase_viewer_role.yaml + diff --git a/config/rbac/postgrescluster_admin_role.yaml b/config/rbac/postgrescluster_admin_role.yaml new file mode 100644 index 000000000..bb3f2e06b --- /dev/null +++ b/config/rbac/postgrescluster_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over enterprise.splunk.com. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgrescluster-admin-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters + verbs: + - '*' +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters/status + verbs: + - get diff --git a/config/rbac/postgrescluster_editor_role.yaml b/config/rbac/postgrescluster_editor_role.yaml new file mode 100644 index 000000000..13884ce4b --- /dev/null +++ b/config/rbac/postgrescluster_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgrescluster-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters/status + verbs: + - get diff --git a/config/rbac/postgrescluster_viewer_role.yaml b/config/rbac/postgrescluster_viewer_role.yaml new file mode 100644 index 000000000..0474151b3 --- /dev/null +++ b/config/rbac/postgrescluster_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgrescluster-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters/status + verbs: + - get diff --git a/config/rbac/postgresclusterclass_admin_role.yaml b/config/rbac/postgresclusterclass_admin_role.yaml new file mode 100644 index 000000000..d16defdd6 --- /dev/null +++ b/config/rbac/postgresclusterclass_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over enterprise.splunk.com. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresclusterclass-admin-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - '*' +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses/status + verbs: + - get diff --git a/config/rbac/postgresclusterclass_editor_role.yaml b/config/rbac/postgresclusterclass_editor_role.yaml new file mode 100644 index 000000000..a634510ff --- /dev/null +++ b/config/rbac/postgresclusterclass_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresclusterclass-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses/status + verbs: + - get diff --git a/config/rbac/postgresclusterclass_viewer_role.yaml b/config/rbac/postgresclusterclass_viewer_role.yaml new file mode 100644 index 000000000..4da318ff2 --- /dev/null +++ b/config/rbac/postgresclusterclass_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresclusterclass-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses/status + verbs: + - get diff --git a/config/rbac/postgresdatabase_admin_role.yaml b/config/rbac/postgresdatabase_admin_role.yaml new file mode 100644 index 000000000..b98548d5c --- /dev/null +++ b/config/rbac/postgresdatabase_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over enterprise.splunk.com. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresdatabase-admin-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases + verbs: + - '*' +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases/status + verbs: + - get diff --git a/config/rbac/postgresdatabase_editor_role.yaml b/config/rbac/postgresdatabase_editor_role.yaml new file mode 100644 index 000000000..21891af10 --- /dev/null +++ b/config/rbac/postgresdatabase_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresdatabase-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases/status + verbs: + - get diff --git a/config/rbac/postgresdatabase_viewer_role.yaml b/config/rbac/postgresdatabase_viewer_role.yaml new file mode 100644 index 000000000..702fab391 --- /dev/null +++ b/config/rbac/postgresdatabase_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresdatabase-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 1bbc2427e..e7f4b73e1 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -53,6 +53,8 @@ rules: - licensemanagers - licensemasters - monitoringconsoles + - postgresclusters + - postgresdatabases - searchheadclusters - standalones verbs: @@ -72,6 +74,8 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - postgresclusters/finalizers + - postgresdatabases/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -85,9 +89,40 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - postgresclusters/status + - postgresdatabases/status - searchheadclusters/status - standalones/status verbs: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - databases + - poolers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get diff --git a/config/samples/enterprise_v4_postgrescluster_default.yaml b/config/samples/enterprise_v4_postgrescluster_default.yaml new file mode 100644 index 000000000..6669aceb2 --- /dev/null +++ b/config/samples/enterprise_v4_postgrescluster_default.yaml @@ -0,0 +1,12 @@ +# This is a sample PostgresCluster manifest with default values for all fields. +# Defaults are inherited from the ClusterClass "postgresql-dev" (see enterprise_v4_clusterclass_dev.yaml) and can be overridden here. +apiVersion: enterprise.splunk.com/v4 +kind: PostgresCluster +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresql-cluster-dev +spec: + class: postgresql-dev + diff --git a/config/samples/enterprise_v4_postgrescluster_dev.yaml b/config/samples/enterprise_v4_postgrescluster_dev.yaml new file mode 100644 index 000000000..b5c6b8700 --- /dev/null +++ b/config/samples/enterprise_v4_postgrescluster_dev.yaml @@ -0,0 +1,28 @@ +# Sample PostgresCluster using Postgres-dev ClusterClass with overriding defaults +# This sample demonstrates how to override default values from the ClusterClass "postgresql-dev" (see enterprise_v4_clusterclass_dev.yaml) in a PostgresCluster manifest. +# Overrides include changing storage, changing PostgreSQL version, and modifying resources. +apiVersion: enterprise.splunk.com/v4 +kind: PostgresCluster +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresql-cluster-dev +spec: + # Reference the ClusterClass to inherit defaults - this is required, immutable, and must match the name of an existing ClusterClass + class: postgresql-dev + clusterDeletionPolicy: Retain + instances: 3 + # Storage and PostgreSQL version are overridden from the ClusterClass defaults. Validation rules on the PostgresCluster resource will prevent removing these fields or setting them to lower values than the original overrides. + storage: 1Gi + postgresVersion: "15.10" + resources: + requests: + cpu: "250m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "1Gi" + # Enable connection pooler for this cluster + # Takes precedence over the class-level connectionPoolerEnabled value + connectionPoolerEnabled: true \ No newline at end of file diff --git a/config/samples/enterprise_v4_postgresclusterclass_dev.yaml b/config/samples/enterprise_v4_postgresclusterclass_dev.yaml new file mode 100644 index 000000000..a9846e36c --- /dev/null +++ b/config/samples/enterprise_v4_postgresclusterclass_dev.yaml @@ -0,0 +1,39 @@ +--- +# Development PostgresClusterClass +# Minimal configuration for local development and testing +apiVersion: enterprise.splunk.com/v4 +kind: PostgresClusterClass +metadata: + name: postgresql-dev +spec: + provisioner: postgresql.cnpg.io + + config: + # Single instance - no HA (suitable for development) + instances: 1 + + # Small storage for development + storage: 10Gi + + # Latest PostgreSQL 18 + postgresVersion: "18" + + # Minimal resources + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + connectionPoolerEnabled: true + + cnpg: + # Restart method - tolerate downtime in dev + primaryUpdateMethod: restart + connectionPooler: + instances: 2 + mode: transaction + config: + max_client_conn: "100" + diff --git a/config/samples/enterprise_v4_postgresclusterclass_prod.yaml b/config/samples/enterprise_v4_postgresclusterclass_prod.yaml new file mode 100644 index 000000000..56d9f232a --- /dev/null +++ b/config/samples/enterprise_v4_postgresclusterclass_prod.yaml @@ -0,0 +1,80 @@ +--- +# Production PostgresClusterClass +# Full configuration with HA, security, and tuned PostgreSQL settings +apiVersion: enterprise.splunk.com/v4 +kind: PostgresClusterClass +metadata: + name: postgresql-prod +spec: + provisioner: postgresql.cnpg.io + + config: + # High availability - 1 primary + 2 replicas + instances: 3 + + # Production storage + storage: 100Gi + + # PostgreSQL 18.1 (specific minor version) + postgresVersion: "18.1" + + # Production-grade resources + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "4" + memory: "16Gi" + + # Tuned PostgreSQL configuration for OLTP workload + postgresqlConfig: + # Connection settings + max_connections: "200" + + # Memory settings (based on 8GB RAM) + shared_buffers: "2GB" + effective_cache_size: "6GB" + maintenance_work_mem: "512MB" + work_mem: "20MB" + + # WAL settings + wal_buffers: "16MB" + min_wal_size: "1GB" + max_wal_size: "4GB" + + # Query tuning + random_page_cost: "1.1" # SSD optimization + effective_io_concurrency: "200" + + # Logging + log_destination: "stderr" + logging_collector: "on" + log_min_duration_statement: "1000" # Log queries > 1s + + # Secure pg_hba configuration + pgHBA: + # Reject all non-SSL connections + - "hostnossl all all 0.0.0.0/0 reject" + # Require SSL + password authentication + - "hostssl all all 0.0.0.0/0 scram-sha-256" + + # Enable connection pooler for clusters using this class + connectionPoolerEnabled: true + + cnpg: + # Switchover method - minimal downtime via automated failover + primaryUpdateMethod: switchover + + # Connection pooler configuration (PgBouncer) + connectionPooler: + # Number of PgBouncer pod replicas + instances: 3 + # Pooling mode + mode: transaction + # PgBouncer configuration parameters + config: + # Maximum number of client connections allowed + max_client_conn: "100" + # Default number of server connections per user/database pair + default_pool_size: "20" diff --git a/config/samples/enterprise_v4_postgresdatabase.yaml b/config/samples/enterprise_v4_postgresdatabase.yaml new file mode 100644 index 000000000..874393548 --- /dev/null +++ b/config/samples/enterprise_v4_postgresdatabase.yaml @@ -0,0 +1,18 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: splunk-databases + # namespace: default +spec: + clusterRef: + name: postgresql-cluster-dev + databases: + - name: kvstore + extensions: + - pg_stat_statements + - pgcrypto + deletionPolicy: Delete + - name: analytics + extensions: + - pg_trgm + deletionPolicy: Delete \ No newline at end of file diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 73c6d3649..4eb884742 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -13,4 +13,7 @@ resources: - enterprise_v4_searchheadcluster.yaml - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml +- enterprise_v4_database.yaml +- enterprise_v4_databaseclass.yaml +- enterprise_v4_postgrescluster.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/validation-tests/database/01-invalid-duplicate-names.yaml b/config/samples/validation-tests/database/01-invalid-duplicate-names.yaml new file mode 100644 index 000000000..95cd4d19b --- /dev/null +++ b/config/samples/validation-tests/database/01-invalid-duplicate-names.yaml @@ -0,0 +1,14 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-duplicate-names + namespace: default +spec: + clusterRef: + name: postgres-cluster + databases: + - name: kvstore + extensions: + - pg_stat_statements + - name: analytics + - name: kvstore # DUPLICATE! Should fail with: "database names must be unique" diff --git a/config/samples/validation-tests/database/02-invalid-immutability-update.yaml b/config/samples/validation-tests/database/02-invalid-immutability-update.yaml new file mode 100644 index 000000000..73dfb300b --- /dev/null +++ b/config/samples/validation-tests/database/02-invalid-immutability-update.yaml @@ -0,0 +1,19 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-postgresdatabase + namespace: default +spec: + clusterRef: + name: different-cluster # CHANGED! Should fail with: "clusterRef is immutable" + databases: + - name: kvstore + extensions: + - pg_stat_statements + - pgcrypto + deletionPolicy: Retain + - name: analytics + extensions: + - pg_trgm + deletionPolicy: Delete + - name: metrics diff --git a/config/samples/validation-tests/database/03-invalid-deletion-policy.yaml b/config/samples/validation-tests/database/03-invalid-deletion-policy.yaml new file mode 100644 index 000000000..bb911e88c --- /dev/null +++ b/config/samples/validation-tests/database/03-invalid-deletion-policy.yaml @@ -0,0 +1,11 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-invalid-policy + namespace: default +spec: + clusterRef: + name: postgres-cluster + databases: + - name: kvstore + deletionPolicy: Archive # INVALID! Only "Delete" or "Retain" allowed diff --git a/config/samples/validation-tests/database/04-invalid-missing-fields.yaml b/config/samples/validation-tests/database/04-invalid-missing-fields.yaml new file mode 100644 index 000000000..c0376eb21 --- /dev/null +++ b/config/samples/validation-tests/database/04-invalid-missing-fields.yaml @@ -0,0 +1,11 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-missing-fields + namespace: default +spec: + # Missing clusterRef - REQUIRED field! + databases: + - name: kvstore + - extensions: # Missing name - REQUIRED in DatabaseDefinition! + - pg_stat_statements diff --git a/go.mod b/go.mod index e1d9c42b5..4b227b9a5 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/splunk/splunk-operator go 1.25.5 require ( - cloud.google.com/go/storage v1.30.1 + cloud.google.com/go/storage v1.56.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 @@ -12,36 +12,46 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.17.71 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.85 github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1 + github.com/cloudnative-pg/cloudnative-pg v1.28.0 github.com/go-logr/logr v1.4.3 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 + github.com/jackc/pgx/v5 v5.8.0 github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.16 - github.com/onsi/ginkgo/v2 v2.27.3 - github.com/onsi/gomega v1.38.3 + github.com/onsi/ginkgo/v2 v2.28.1 + github.com/onsi/gomega v1.39.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 - github.com/stretchr/testify v1.9.0 + github.com/prometheus/client_golang v1.23.2 + github.com/sethvargo/go-password v0.3.1 + github.com/stretchr/testify v1.11.1 github.com/wk8/go-ordered-map/v2 v2.1.7 - go.uber.org/zap v1.26.0 - google.golang.org/api v0.126.0 - k8s.io/api v0.31.0 - k8s.io/apiextensions-apiserver v0.31.0 - k8s.io/apimachinery v0.31.0 - k8s.io/client-go v0.31.0 + go.uber.org/zap v1.27.1 + google.golang.org/api v0.263.0 + k8s.io/api v0.34.2 + k8s.io/apiextensions-apiserver v0.34.2 + k8s.io/apimachinery v0.34.2 + k8s.io/client-go v0.34.2 k8s.io/kubectl v0.26.2 - sigs.k8s.io/controller-runtime v0.19.0 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + sigs.k8s.io/controller-runtime v0.22.4 ) require ( - cloud.google.com/go v0.110.7 // indirect - cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.1 // indirect + cel.dev/expr v0.24.0 // indirect + cloud.google.com/go v0.121.6 // indirect + cloud.google.com/go/auth v0.18.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.5.3 // indirect + cloud.google.com/go/monitoring v1.24.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect @@ -62,102 +72,125 @@ require ( github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudnative-pg/barman-cloud v0.3.3 // indirect + github.com/cloudnative-pg/cnpg-i v0.3.0 // indirect + github.com/cloudnative-pg/machinery v0.3.1 // indirect + github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.4 // indirect + github.com/go-openapi/jsonpointer v0.22.0 // indirect + github.com/go-openapi/jsonreference v0.21.1 // indirect + github.com/go-openapi/swag v0.24.1 // indirect + github.com/go-openapi/swag/cmdutils v0.24.0 // indirect + github.com/go-openapi/swag/conv v0.24.0 // indirect + github.com/go-openapi/swag/fileutils v0.24.0 // indirect + github.com/go-openapi/swag/jsonname v0.24.0 // indirect + github.com/go-openapi/swag/jsonutils v0.24.0 // indirect + github.com/go-openapi/swag/loading v0.24.0 // indirect + github.com/go-openapi/swag/mangling v0.24.0 // indirect + github.com/go-openapi/swag/netutils v0.24.0 // indirect + github.com/go-openapi/swag/stringutils v0.24.0 // indirect + github.com/go-openapi/swag/typeutils v0.24.0 // indirect + github.com/go-openapi/swag/yamlutils v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.20.1 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect - github.com/google/s2a-go v0.1.4 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.11.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/cel-go v0.26.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect + github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect + github.com/googleapis/gax-go/v2 v2.16.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.13.5 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/minio/md5-simd v1.1.0 // indirect github.com/minio/sha256-simd v0.1.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/moby/spdystream v0.4.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/rs/xid v1.2.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/stoewer/go-strcase v1.2.0 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/otel v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.28.0 // indirect - go.opentelemetry.io/otel/sdk v1.28.0 // indirect - go.opentelemetry.io/otel/trace v1.28.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.45.0 // indirect - golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect - golang.org/x/mod v0.29.0 // indirect - golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.27.0 // indirect - golang.org/x/sync v0.18.0 // indirect - golang.org/x/sys v0.38.0 // indirect - golang.org/x/term v0.37.0 // indirect - golang.org/x/text v0.31.0 // indirect - golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.38.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/mod v0.32.0 // indirect + golang.org/x/net v0.49.0 // indirect + golang.org/x/oauth2 v0.34.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/tools v0.41.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect - google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.36.7 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d // indirect + google.golang.org/grpc v1.78.0 // indirect + google.golang.org/protobuf v1.36.11 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.31.0 // indirect - k8s.io/component-base v0.31.0 // indirect + k8s.io/apiserver v0.34.2 // indirect + k8s.io/component-base v0.34.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + k8s.io/kube-openapi v0.0.0-20250905212525-66792eed8611 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index b0a4c1cd2..b8bfa66e4 100644 --- a/go.sum +++ b/go.sum @@ -1,13 +1,25 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= -cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= -cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= -cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= -cloud.google.com/go/storage v1.30.1 h1:uOdMxAs8HExqBlnLtnQyP0YkvbiDpdGShGKtx6U/oNM= -cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= +cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= +cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= +cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY= +cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw= +cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= +cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsLxI= +cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU= +cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= +cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 h1:JZg6HRh6W6U4OLl6lk7BZ7BLisIzM9dG1R50zUk9C/M= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0/go.mod h1:YL1xnZ6QejvQHWJrX/AvhFl4WW4rqHVoKspWNVwFk0M= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= @@ -20,16 +32,20 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1 h1:cf+OIKbkmMHBaC3u7 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.4.1/go.mod h1:ap1dmS6vQKJxSMNiGJcq4QuUQkOynyD93gLw6MDF7ek= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go-v2 v1.36.6 h1:zJqGjVbRdTPojeCGWn5IR5pbJwSQSBh5RWFTQcEQGdU= github.com/aws/aws-sdk-go-v2 v1.36.6/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA= @@ -78,19 +94,19 @@ github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMU github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cloudnative-pg/barman-cloud v0.3.3 h1:EEcjeV+IUivDpmyF/H/XGY1pGaKJ5LS5MYeB6wgGcak= +github.com/cloudnative-pg/barman-cloud v0.3.3/go.mod h1:5CM4MncAxAjnqxjDt0I5E/oVd7gsMLL0/o/wQ+vUSgs= +github.com/cloudnative-pg/cloudnative-pg v1.28.0 h1:vkv0a0ewDSfJOPJrsyUr4uczsxheReAWf/k171V0Dm0= +github.com/cloudnative-pg/cloudnative-pg v1.28.0/go.mod h1:209fkRR6m0vXUVQ9Q498eAPQqN2UlXECbXXtpGsZz3I= +github.com/cloudnative-pg/cnpg-i v0.3.0 h1:5ayNOG5x68lU70IVbHDZQrv5p+bErCJ0mqRmOpW2jjE= +github.com/cloudnative-pg/cnpg-i v0.3.0/go.mod h1:VOIWWXcJ1RyioK+elR2DGOa4cBA6K+6UQgx05aZmH+g= +github.com/cloudnative-pg/machinery v0.3.1 h1:KtPA6EwELTUNisCMLiFYkK83GU9606rkGQhDJGPB8Yw= +github.com/cloudnative-pg/machinery v0.3.1/go.mod h1:jebuqKxZAbrRKDEEpVCIDMKW+FbWtB9Kf/hb2kMUu9o= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= +github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -98,31 +114,34 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= -github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo= +github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -130,13 +149,34 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/jsonpointer v0.22.0 h1:TmMhghgNef9YXxTu1tOopo+0BGEytxA+okbry0HjZsM= +github.com/go-openapi/jsonpointer v0.22.0/go.mod h1:xt3jV88UtExdIkkL7NloURjRQjbeUgcxFblMjq2iaiU= +github.com/go-openapi/jsonreference v0.21.1 h1:bSKrcl8819zKiOgxkbVNRUBIr6Wwj9KYrDbMjRs0cDA= +github.com/go-openapi/jsonreference v0.21.1/go.mod h1:PWs8rO4xxTUqKGu+lEvvCxD5k2X7QYkKAepJyCmSTT8= +github.com/go-openapi/swag v0.24.1 h1:DPdYTZKo6AQCRqzwr/kGkxJzHhpKxZ9i/oX0zag+MF8= +github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A= +github.com/go-openapi/swag/cmdutils v0.24.0 h1:KlRCffHwXFI6E5MV9n8o8zBRElpY4uK4yWyAMWETo9I= +github.com/go-openapi/swag/cmdutils v0.24.0/go.mod h1:uxib2FAeQMByyHomTlsP8h1TtPd54Msu2ZDU/H5Vuf8= +github.com/go-openapi/swag/conv v0.24.0 h1:ejB9+7yogkWly6pnruRX45D1/6J+ZxRu92YFivx54ik= +github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c= +github.com/go-openapi/swag/fileutils v0.24.0 h1:U9pCpqp4RUytnD689Ek/N1d2N/a//XCeqoH508H5oak= +github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90= +github.com/go-openapi/swag/jsonname v0.24.0 h1:2wKS9bgRV/xB8c62Qg16w4AUiIrqqiniJFtZGi3dg5k= +github.com/go-openapi/swag/jsonname v0.24.0/go.mod h1:GXqrPzGJe611P7LG4QB9JKPtUZ7flE4DOVechNaDd7Q= +github.com/go-openapi/swag/jsonutils v0.24.0 h1:F1vE1q4pg1xtO3HTyJYRmEuJ4jmIp2iZ30bzW5XgZts= +github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0= +github.com/go-openapi/swag/loading v0.24.0 h1:ln/fWTwJp2Zkj5DdaX4JPiddFC5CHQpvaBKycOlceYc= +github.com/go-openapi/swag/loading v0.24.0/go.mod h1:gShCN4woKZYIxPxbfbyHgjXAhO61m88tmjy0lp/LkJk= +github.com/go-openapi/swag/mangling v0.24.0 h1:PGOQpViCOUroIeak/Uj/sjGAq9LADS3mOyjznmHy2pk= +github.com/go-openapi/swag/mangling v0.24.0/go.mod h1:Jm5Go9LHkycsz0wfoaBDkdc4CkpuSnIEf62brzyCbhc= +github.com/go-openapi/swag/netutils v0.24.0 h1:Bz02HRjYv8046Ycg/w80q3g9QCWeIqTvlyOjQPDjD8w= +github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaPR0HnPbmArSRN1SkZM= +github.com/go-openapi/swag/stringutils v0.24.0 h1:i4Z/Jawf9EvXOLUbT97O0HbPUja18VdBxeadyAqS1FM= +github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w= +github.com/go-openapi/swag/typeutils v0.24.0 h1:d3szEGzGDf4L2y1gYOSSLeK6h46F+zibnEas2Jm/wIw= +github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI= +github.com/go-openapi/swag/yamlutils v0.24.0 h1:bhw4894A7Iw6ne+639hsBNRHg9iZg/ISrOVr+sJGp4c= +github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= @@ -145,68 +185,47 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= -github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= -github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 h1:z2ogiKUYzX5Is6zr/vP9vJGqPwcdqsWjOt+V8J7+bTc= +github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83/go.mod h1:MxpfABSjhmINe3F1It9d+8exIHFvUqtLIRCdOGNXqiI= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= -github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= +github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao= +github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8= +github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= +github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3 h1:5ZPtiqj0JL5oKWmcsq4VMaAW5ukBEgSGXEN89zeH1Jo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3/go.mod h1:ndYquD05frm2vACXE1nsccT4oJzjhw2arTS2cpUD1PI= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= +github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -219,23 +238,27 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.5 h1:9O69jUPDcsT9fEm74W92rZL9FQY7rCdaXVneq+yyzl4= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 h1:bMqrb3UHgHbP+PW9VwiejfDJU1R0PpXVZNMdeH8WYKI= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= @@ -248,56 +271,64 @@ github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKU github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= -github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.27.3 h1:ICsZJ8JoYafeXFFlFAG75a7CxMsJHwgKwtO+82SE9L8= -github.com/onsi/ginkgo/v2 v2.27.3/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= -github.com/onsi/gomega v1.38.3 h1:eTX+W6dobAYfFeGC2PV6RwXRu/MyT+cQguijutvkpSM= -github.com/onsi/gomega v1.38.3/go.mod h1:ZCU1pkQcXDO5Sl9/VVEGlDyp+zm0m1cmeG5TOzLgdh4= +github.com/onsi/ginkgo/v2 v2.28.1 h1:S4hj+HbZp40fNKuLUQOYLDgZLwNUVn19N3Atb98NCyI= +github.com/onsi/ginkgo/v2 v2.28.1/go.mod h1:CLtbVInNckU3/+gC8LzkGUb9oF+e8W8TdUsxPwvdOgE= +github.com/onsi/gomega v1.39.1 h1:1IJLAad4zjPn2PsnhH70V4DKRFlrCzGBNrNaru+Vf28= +github.com/onsi/gomega v1.39.1/go.mod h1:hL6yVALoTOxeWudERyfppUcZXjMwIMLnuSfruD2lcfg= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 h1:VRXUgbGmpmjZgFYiUnTwlC+JjfCUs5KKFsorJhI1ZKQ= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2/go.mod h1:nPk0OteXBkbT0CRCa2oZQL1jRLW6RJ2fuIijHypeJdk= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= +github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -306,13 +337,14 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= +github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -327,223 +359,160 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0 h1:tgJ0uaNS4c98WRNUEx5U3aDlrDOI5Rs+1Vifcw4DJ8U= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0/go.mod h1:U7HYyW0zt/a9x5J1Kjs+r1f/d4ZHnYFclhYY2+YbeoE= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= +go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= -golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc h1:mCRnTeVUjcrhlRmO0VK8a6k6Rrf6TF9htwo2pJVSjIU= -golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= -golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= -golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= -golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= -golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= -golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= -golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= +golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= -google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 h1:7whR9kGa5LUwFtpLm2ArCEejtnxlGeLbAyjFY8sGNFw= -google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= -google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.263.0 h1:UFs7qn8gInIdtk1ZA6eXRXp5JDAnS4x9VRsRVCeKdbk= +google.golang.org/api v0.263.0/go.mod h1:fAU1xtNNisHgOF5JooAs8rRaTkl2rT3uaoNGo9NS3R8= +google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= +google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d h1:xXzuihhT3gL/ntduUZwHECzAn57E8dA6l8SOtYWdD8Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260122232226-8e98ce8d340d/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= +google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= +google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= -k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= -k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= -k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= -k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc= -k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= -k8s.io/apiserver v0.31.0 h1:p+2dgJjy+bk+B1Csz+mc2wl5gHwvNkC9QJV+w55LVrY= -k8s.io/apiserver v0.31.0/go.mod h1:KI9ox5Yu902iBnnyMmy7ajonhKnkeZYJhTZ/YI+WEMk= -k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= -k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= -k8s.io/component-base v0.31.0 h1:/KIzGM5EvPNQcYgwq5NwoQBaOlVFrghoVGr8lG6vNRs= -k8s.io/component-base v0.31.0/go.mod h1:TYVuzI1QmN4L5ItVdMSXKvH7/DtvIuas5/mm8YT3rTo= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= +k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE= +k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= +k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20250905212525-66792eed8611 h1:o4oKOsvSymDkZRsMAPZU7bRdwL+lPOK5VS10Dr1D6eg= +k8s.io/kube-openapi v0.0.0-20250905212525-66792eed8611/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/kubectl v0.26.2 h1:SMPB4j48eVFxsYluBq3VLyqXtE6b72YnszkbTAtFye4= k8s.io/kubectl v0.26.2/go.mod h1:KYWOXSwp2BrDn3kPeoU/uKzKtdqvhK1dgZGd0+no4cM= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= -sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/controller/postgrescluster_controller.go b/internal/controller/postgrescluster_controller.go new file mode 100644 index 000000000..c33c016cb --- /dev/null +++ b/internal/controller/postgrescluster_controller.go @@ -0,0 +1,1541 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + logs "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// PostgresClusterReconciler reconciles PostgresCluster resources. +type PostgresClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// EffectiveClusterConfig holds the effective PostgresCluster spec and CNPG settings after class defaults are applied. +type EffectiveClusterConfig struct { + ClusterSpec *enterprisev4.PostgresClusterSpec + ProvisionerConfig *enterprisev4.CNPGConfig +} + +// normalizedManagedRole holds only the fields this controller sets on a CNPG RoleConfiguration. +// CNPG's admission webhook populates defaults (ConnectionLimit: -1, Inherit: true) that would +// cause equality.Semantic.DeepEqual to always report a diff — we compare only what we own. +type normalizedManagedRole struct { + Name string + Ensure cnpgv1.EnsureOption + Login bool + PasswordSecret string +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusterclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters/status,verbs=get +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=poolers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=poolers/status,verbs=get + +// Reconcile drives PostgresCluster toward the desired CNPG resources and status. +func (r *PostgresClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := logs.FromContext(ctx) + logger.Info("Reconciling PostgresCluster", "name", req.Name, "namespace", req.Namespace) + + var cnpgCluster *cnpgv1.Cluster + var poolerEnabled bool + var postgresSecretName string + secret := &corev1.Secret{} + + // Phase: ResourceFetch + postgresCluster := &enterprisev4.PostgresCluster{} + if getPGClusterErr := r.Get(ctx, req.NamespacedName, postgresCluster); getPGClusterErr != nil { + if apierrors.IsNotFound(getPGClusterErr) { + logger.Info("PostgresCluster deleted, skipping reconciliation") + return ctrl.Result{}, nil + } + logger.Error(getPGClusterErr, "Unable to fetch PostgresCluster") + return ctrl.Result{}, getPGClusterErr + } + persistedStatus := postgresCluster.Status.DeepCopy() + + if postgresCluster.Status.Resources == nil { + postgresCluster.Status.Resources = &enterprisev4.PostgresClusterResources{} + } + + // Keep condition and phase updates consistent across the reconcile flow. + updateStatus := func( + conditionType conditionTypes, + status metav1.ConditionStatus, + reason conditionReasons, + message string, + phase reconcileClusterPhases) { + r.updateStatus(postgresCluster, conditionType, status, reason, message, phase) + } + + // Phase: FinalizerHandling + // Handle deletion before any create or patch path so cleanup wins over reconciliation. + finalizerErr := r.handleFinalizer(ctx, postgresCluster, secret, cnpgCluster) + if finalizerErr != nil { + if apierrors.IsNotFound(finalizerErr) { + logger.Info("PostgresCluster already deleted, skipping finalizer update") + return ctrl.Result{}, nil + } + + logger.Error(finalizerErr, "Failed to handle finalizer") + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonClusterDeleteFailed, + fmt.Sprintf("Failed to delete resources during cleanup: %v", finalizerErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, finalizerErr + } + + if postgresCluster.GetDeletionTimestamp() != nil { + logger.Info("PostgresCluster is being deleted, cleanup complete") + return ctrl.Result{}, nil + } + + // Register the finalizer before creating managed resources. + if !controllerutil.ContainsFinalizer(postgresCluster, postgresClusterFinalizerName) { + controllerutil.AddFinalizer(postgresCluster, postgresClusterFinalizerName) + if updateErr := r.Update(ctx, postgresCluster); updateErr != nil { + if apierrors.IsConflict(updateErr) { + logger.Info("Conflict while adding finalizer, will retry on next reconcile") + return ctrl.Result{Requeue: true}, nil + } + logger.Error(updateErr, "Failed to add finalizer to PostgresCluster") + return ctrl.Result{}, updateErr + } + logger.Info("Finalizer added successfully") + return ctrl.Result{Requeue: true}, nil + } + + // Phase: ClassResolution + postgresClusterClass := &enterprisev4.PostgresClusterClass{} + if getClusterClassErr := r.Get(ctx, client.ObjectKey{Name: postgresCluster.Spec.Class}, postgresClusterClass); getClusterClassErr != nil { + logger.Error(getClusterClassErr, "Unable to fetch referenced PostgresClusterClass", "className", postgresCluster.Spec.Class) + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonClusterClassNotFound, + fmt.Sprintf("ClusterClass %s not found: %v", postgresCluster.Spec.Class, getClusterClassErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, getClusterClassErr + } + + // Phase: ConfigurationMerging + // Merge PostgresCluster overrides on top of PostgresClusterClass defaults. + mergedConfig, mergeErr := r.getMergedConfig(postgresClusterClass, postgresCluster) + if mergeErr != nil { + logger.Error(mergeErr, "Failed to merge PostgresCluster configuration") + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonInvalidConfiguration, + fmt.Sprintf("Failed to merge configuration: %v", mergeErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, mergeErr + } + + // Phase: CredentialProvisioning + // The superuser secret must exist before the CNPG Cluster can be created or updated. + if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.SecretRef != nil { + postgresSecretName = postgresCluster.Status.Resources.SecretRef.Name + logger.Info("Using existing secret from status", "name", postgresSecretName) + } else { + postgresSecretName = fmt.Sprintf("%s%s", postgresCluster.Name, defaultSecretSuffix) + logger.Info("Generating new secret name", "name", postgresSecretName) + } + postgresClusterSecretExists, secretExistErr := r.clusterSecretExists(ctx, postgresCluster.Namespace, postgresSecretName, secret) + if secretExistErr != nil { + logger.Error(secretExistErr, "Failed to check if PostgresCluster secret exists", "name", postgresSecretName) + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonUserSecretFailed, + fmt.Sprintf("Failed to check secret existence: %v", secretExistErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, secretExistErr + } + + if !postgresClusterSecretExists { + logger.Info("Creating PostgresCluster secret", "name", postgresSecretName) + if generateSecretErr := r.generateSecret(ctx, postgresCluster, postgresSecretName); generateSecretErr != nil { + logger.Error(generateSecretErr, "Failed to ensure PostgresCluster secret", "name", postgresSecretName) + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonUserSecretFailed, + fmt.Sprintf("Failed to generate PostgresCluster secret: %v", generateSecretErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, generateSecretErr + } + logger.Info("PostgresCluster secret created successfully", "name", postgresSecretName) + } + + // Re-link an existing secret if its owner reference was removed. + if postgresClusterSecretExists { + restoredSecretOwnerRef, restoreErr := r.restoreOwnerRef(ctx, postgresCluster, secret, "Secret") + if restoreErr != nil { + logger.Error(restoreErr, "Failed to restore owner reference on Secret") + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonSuperUserSecretFailed, + fmt.Sprintf("Failed to link existing secret: %v", restoreErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, restoreErr + } + if restoredSecretOwnerRef { + logger.Info("Existing secret linked successfully") + } + } + + if postgresCluster.Status.Resources == nil { + postgresCluster.Status.Resources = &enterprisev4.PostgresClusterResources{} + } + if postgresCluster.Status.Resources.SecretRef == nil { + postgresCluster.Status.Resources.SecretRef = &corev1.LocalObjectReference{Name: postgresSecretName} + return r.persistStatus(ctx, postgresCluster, persistedStatus) + } + + // Phase: ClusterSpecConstruction + desiredSpec := r.buildCNPGClusterSpec(mergedConfig, postgresSecretName) + + // Phase: ClusterReconciliation + // Create the CNPG Cluster on first reconcile, otherwise compare and patch drift. + existingCNPG := &cnpgv1.Cluster{} + getErr := r.Get(ctx, types.NamespacedName{Name: postgresCluster.Name, Namespace: postgresCluster.Namespace}, existingCNPG) + + if apierrors.IsNotFound(getErr) { + // CNPG Cluster doesn't exist yet. Create it and return so status can be observed on the next pass. + logger.Info("CNPG Cluster not found, creating", "name", postgresCluster.Name) + newCluster := r.buildCNPGCluster(postgresCluster, mergedConfig, postgresSecretName) + if createErr := r.Create(ctx, newCluster); createErr != nil { + logger.Error(createErr, "Failed to create CNPG Cluster") + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonClusterBuildFailed, + fmt.Sprintf("Failed to create CNPG Cluster: %v", createErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, createErr + } + + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonClusterBuildSucceeded, + "CNPG Cluster created", + provisioningClusterPhase) + if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { + return result, persistErr + } + logger.Info("CNPG Cluster created successfully,", "name", postgresCluster.Name) + return ctrl.Result{}, nil + + } + if getErr != nil { + logger.Error(getErr, "Failed to get CNPG Cluster") + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonClusterGetFailed, + fmt.Sprintf("Failed to get CNPG Cluster: %v", getErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, getErr + } + + cnpgCluster = existingCNPG + // Re-link an existing CNPG Cluster if its owner reference was removed. + if restoredClusterOwnerRef, restoreErr := r.restoreOwnerRef(ctx, postgresCluster, cnpgCluster, "CNPGCluster"); restoreErr != nil { + logger.Error(restoreErr, "Failed to restore owner reference on CNPG Cluster") + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonClusterPatchFailed, + fmt.Sprintf("Failed to link existing CNPG Cluster: %v", restoreErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, restoreErr + } else if restoredClusterOwnerRef { + logger.Info("Existing CNPG Cluster linked successfully", "cluster", cnpgCluster.Name) + } + + // Patch the CNPG Cluster when the live spec differs from the desired spec. + currentNormalizedSpec := normalizeCNPGClusterSpec(cnpgCluster.Spec, mergedConfig.ClusterSpec.PostgreSQLConfig) + desiredNormalizedSpec := normalizeCNPGClusterSpec(desiredSpec, mergedConfig.ClusterSpec.PostgreSQLConfig) + + if !equality.Semantic.DeepEqual(currentNormalizedSpec, desiredNormalizedSpec) { + logger.Info("Detected drift in CNPG Cluster spec, patching", "name", cnpgCluster.Name) + originalCluster := cnpgCluster.DeepCopy() + cnpgCluster.Spec = desiredSpec + if patchErr := r.patchObject(ctx, originalCluster, cnpgCluster, "CNPGCluster"); patchErr != nil { + if apierrors.IsConflict(patchErr) { + logger.Info("Conflict occurred while updating CNPG Cluster, requeueing", "name", cnpgCluster.Name) + return ctrl.Result{Requeue: true}, nil + } + logger.Error(patchErr, "Failed to patch CNPG Cluster", "name", cnpgCluster.Name) + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonClusterPatchFailed, + fmt.Sprintf("Failed to patch CNPG Cluster: %v", patchErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, patchErr + } + logger.Info("CNPG Cluster patched successfully", "name", cnpgCluster.Name) + return ctrl.Result{}, nil + } + + // Phase: ManagedRoleReconciliation + if managedRolesErr := r.reconcileManagedRoles(ctx, postgresCluster, cnpgCluster); managedRolesErr != nil { + logger.Error(managedRolesErr, "Failed to reconcile managed roles") + updateStatus( + clusterReady, + metav1.ConditionFalse, + reasonManagedRolesFailed, + fmt.Sprintf("Failed to reconcile managed roles: %v", managedRolesErr), + failedClusterPhase) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, managedRolesErr + } + + // Phase: ClusterStatusProjection + // Project CNPG status before later phase-specific early returns so cluster status stays current. + clusterConditionStatus, clusterReason, clusterMessage, clusterPhase := r.syncStatus(postgresCluster, cnpgCluster) + + logger.Info( + "Mapped CNPG status to PostgresCluster", "cnpgPhase", + cnpgCluster.Status.Phase, "postgresClusterPhase", + clusterPhase, "conditionStatus", + clusterConditionStatus, "reason", + clusterReason, "message", + clusterMessage) + + updateStatus( + clusterReady, + clusterConditionStatus, + clusterReason, + clusterMessage, + clusterPhase, + ) + + // Phase: PoolerReconciliation + poolerEnabled = mergedConfig.ClusterSpec.ConnectionPoolerEnabled != nil && *mergedConfig.ClusterSpec.ConnectionPoolerEnabled + if poolerEnabled { + if mergedConfig.ProvisionerConfig.ConnectionPooler == nil { + logger.Info("Connection pooler enabled but no config found in class or cluster spec", + "class", postgresCluster.Spec.Class, + "cluster", postgresCluster.Name, + ) + updateStatus( + poolerReady, + metav1.ConditionFalse, + reasonPoolerConfigMissing, + fmt.Sprintf("Connection pooler is enabled but no config found in class %q or cluster %q", + postgresCluster.Spec.Class, postgresCluster.Name), + failedClusterPhase, + ) + return r.persistStatus(ctx, postgresCluster, persistedStatus) + } + if createPoolerErr := r.createConnectionPoolers(ctx, postgresCluster, mergedConfig, cnpgCluster); createPoolerErr != nil { + logger.Error(createPoolerErr, "Failed to create connection poolers") + updateStatus( + poolerReady, + metav1.ConditionFalse, + reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to create connection poolers: %v", createPoolerErr), + failedClusterPhase, + ) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, createPoolerErr + } + + rwPooler := &cnpgv1.Pooler{} + rwErr := r.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readWriteEndpoint), + Namespace: postgresCluster.Namespace, + }, rwPooler) + roPooler := &cnpgv1.Pooler{} + roErr := r.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readOnlyEndpoint), + Namespace: postgresCluster.Namespace, + }, roPooler) + if rwErr != nil || roErr != nil || !r.arePoolersReady(rwPooler, roPooler) { + logger.Info("Connection poolers are not ready yet, requeueing") + updateStatus( + poolerReady, + metav1.ConditionFalse, + reasonPoolerCreating, + "Connection poolers are being provisioned", + provisioningClusterPhase, + ) + if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { + return result, persistErr + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + + message, err := r.syncPoolerStatus(ctx, postgresCluster) + if err != nil { + updateStatus( + poolerReady, + metav1.ConditionFalse, + reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to sync pooler status: %v", err), + failedClusterPhase, + ) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, err + } + + updateStatus( + poolerReady, + metav1.ConditionTrue, + reasonAllInstancesReady, + fmt.Sprintf("All connection poolers are ready: %s", message), + clusterPhase, + ) + } else { + if err := r.deleteConnectionPoolers(ctx, postgresCluster); err != nil { + logger.Error(err, "Failed to delete connection poolers") + updateStatus( + poolerReady, + metav1.ConditionFalse, + reasonPoolerReconciliationFailed, + "Failed to delete connection poolers", + failedClusterPhase, + ) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, err + } + if r.poolerExists(ctx, postgresCluster, readWriteEndpoint) || r.poolerExists(ctx, postgresCluster, readOnlyEndpoint) { + updateStatus( + poolerReady, + metav1.ConditionFalse, + reasonPoolerCreating, + "Connection poolers are being deleted", + provisioningClusterPhase, + ) + if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { + return result, persistErr + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + postgresCluster.Status.ConnectionPoolerStatus = nil + meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, string(poolerReady)) + } + + // Phase: ConnectionMetadata + // Publish connection details after the cluster and optional poolers reach the desired state. + desiredConfigMap, err := r.generateConfigMap(postgresCluster, postgresSecretName, poolerEnabled) + if err != nil { + logger.Error(err, "Failed to generate ConfigMap") + updateStatus( + configMapReady, + metav1.ConditionFalse, + reasonConfigMapFailed, + fmt.Sprintf("Failed to generate ConfigMap: %v", err), + failedClusterPhase, + ) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, err + } + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: desiredConfigMap.Name, + Namespace: desiredConfigMap.Namespace, + }, + } + + createOrUpdateResult, err := controllerutil.CreateOrUpdate(ctx, r.Client, configMap, func() error { + configMap.Data = desiredConfigMap.Data + configMap.Labels = desiredConfigMap.Labels + configMap.Annotations = desiredConfigMap.Annotations + + if !metav1.IsControlledBy(configMap, postgresCluster) { + if err := ctrl.SetControllerReference(postgresCluster, configMap, r.Scheme); err != nil { + return fmt.Errorf("set controller reference failed: %w", err) + } + } + return nil + }) + if err != nil { + logger.Error(err, "Failed to reconcile ConfigMap", "name", desiredConfigMap.Name) + updateStatus( + configMapReady, + metav1.ConditionFalse, + reasonConfigMapFailed, + fmt.Sprintf("Failed to reconcile ConfigMap: %v", err), + failedClusterPhase, + ) + _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) + return ctrl.Result{}, err + } + + switch createOrUpdateResult { + case controllerutil.OperationResultCreated: + logger.Info("ConfigMap created", "name", desiredConfigMap.Name) + case controllerutil.OperationResultUpdated: + logger.Info("ConfigMap updated", "name", desiredConfigMap.Name) + case controllerutil.OperationResultNone: + logger.Info("ConfigMap unchanged", "name", desiredConfigMap.Name) + } + + if postgresCluster.Status.Resources.ConfigMapRef == nil || + postgresCluster.Status.Resources.ConfigMapRef.Name != desiredConfigMap.Name { + postgresCluster.Status.Resources.ConfigMapRef = &corev1.LocalObjectReference{Name: desiredConfigMap.Name} + logger.Info("ConfigMap reference updated in status", "configMap", desiredConfigMap.Name) + } + // Phase: ReadyStatus + // Persist the final ConfigMap status update and finish the reconcile pass. + updateStatus( + configMapReady, + metav1.ConditionTrue, + reasonConfigMapsCreated, + fmt.Sprintf("ConfigMap is ready: %s", desiredConfigMap.Name), + clusterPhase, + ) + if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { + return result, persistErr + } + logger.Info("Reconciliation complete") + return ctrl.Result{}, nil +} + +// getMergedConfig applies PostgresClusterClass defaults and validates the required resulting fields. +func (r *PostgresClusterReconciler) getMergedConfig(clusterClass *enterprisev4.PostgresClusterClass, cluster *enterprisev4.PostgresCluster) (*EffectiveClusterConfig, error) { + resultConfig := cluster.Spec.DeepCopy() + classDefaults := clusterClass.Spec.Config + + if resultConfig.Instances == nil { + resultConfig.Instances = classDefaults.Instances + } + if resultConfig.PostgresVersion == nil { + resultConfig.PostgresVersion = classDefaults.PostgresVersion + } + if resultConfig.Resources == nil { + resultConfig.Resources = classDefaults.Resources + } + if resultConfig.Storage == nil { + resultConfig.Storage = classDefaults.Storage + } + if len(resultConfig.PostgreSQLConfig) == 0 { + resultConfig.PostgreSQLConfig = classDefaults.PostgreSQLConfig + } + if len(resultConfig.PgHBA) == 0 { + resultConfig.PgHBA = classDefaults.PgHBA + } + + if resultConfig.Instances == nil || resultConfig.PostgresVersion == nil || resultConfig.Storage == nil { + return nil, fmt.Errorf("invalid configuration for class %s: instances, postgresVersion and storage are required", clusterClass.Name) + } + + if resultConfig.PostgreSQLConfig == nil { + resultConfig.PostgreSQLConfig = make(map[string]string) + } + if resultConfig.PgHBA == nil { + resultConfig.PgHBA = make([]string, 0) + } + if resultConfig.Resources == nil { + resultConfig.Resources = &corev1.ResourceRequirements{} + } + + return &EffectiveClusterConfig{ + ClusterSpec: resultConfig, + ProvisionerConfig: clusterClass.Spec.CNPG, + }, nil +} + +// buildCNPGClusterSpec builds the desired CNPG ClusterSpec from the merged configuration. +// IMPORTANT: any field added here must also be added to normalizedCNPGClusterSpec and normalizeCNPGClusterSpec, +// otherwise it will not be included in drift detection and changes will be silently ignored. +func (r *PostgresClusterReconciler) buildCNPGClusterSpec(mergedConfig *EffectiveClusterConfig, secretName string) cnpgv1.ClusterSpec { + + // 3. Build the Spec + spec := cnpgv1.ClusterSpec{ + ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s", *mergedConfig.ClusterSpec.PostgresVersion), + Instances: int(*mergedConfig.ClusterSpec.Instances), + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + Parameters: mergedConfig.ClusterSpec.PostgreSQLConfig, + PgHBA: mergedConfig.ClusterSpec.PgHBA, + }, + SuperuserSecret: &cnpgv1.LocalObjectReference{ + Name: secretName, + }, + EnableSuperuserAccess: ptr.To(true), + + Bootstrap: &cnpgv1.BootstrapConfiguration{ + InitDB: &cnpgv1.BootstrapInitDB{ + Database: defaultDatabaseName, + Owner: superUsername, + Secret: &cnpgv1.LocalObjectReference{ + Name: secretName, + }, + }, + }, + StorageConfiguration: cnpgv1.StorageConfiguration{ + Size: mergedConfig.ClusterSpec.Storage.String(), + }, + Resources: *mergedConfig.ClusterSpec.Resources, + } + + return spec +} + +// buildCNPGCluster builds the CNPG Cluster object for the merged PostgresCluster configuration. +func (r *PostgresClusterReconciler) buildCNPGCluster( + postgresCluster *enterprisev4.PostgresCluster, + mergedConfig *EffectiveClusterConfig, + secretName string, +) *cnpgv1.Cluster { + cnpgCluster := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: postgresCluster.Name, + Namespace: postgresCluster.Namespace, + }, + Spec: r.buildCNPGClusterSpec(mergedConfig, secretName), + } + ctrl.SetControllerReference(postgresCluster, cnpgCluster, r.Scheme) + return cnpgCluster +} + +// poolerResourceName returns the CNPG Pooler resource name for a given cluster and type (rw/ro). +func poolerResourceName(clusterName, poolerType string) string { + return fmt.Sprintf("%s%s%s", clusterName, defaultPoolerSuffix, poolerType) +} + +// createConnectionPoolers ensures both RW and RO CNPG Pooler resources exist by creating missing poolers. +func (r *PostgresClusterReconciler) createConnectionPoolers( + ctx context.Context, + postgresCluster *enterprisev4.PostgresCluster, + mergedConfig *EffectiveClusterConfig, + cnpgCluster *cnpgv1.Cluster, +) error { + // Ensure the RW pooler exists. + if err := r.createConnectionPooler(ctx, postgresCluster, mergedConfig, cnpgCluster, readWriteEndpoint); err != nil { + return fmt.Errorf("failed to reconcile RW pooler: %w", err) + } + + // Ensure the RO pooler exists. + if err := r.createConnectionPooler(ctx, postgresCluster, mergedConfig, cnpgCluster, readOnlyEndpoint); err != nil { + return fmt.Errorf("failed to reconcile RO pooler: %w", err) + } + + return nil +} + +// poolerExists reports whether the named pooler resource exists. +func (r *PostgresClusterReconciler) poolerExists(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, poolerType string) bool { + pooler := &cnpgv1.Pooler{} + err := r.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, poolerType), + Namespace: postgresCluster.Namespace, + }, pooler) + + if apierrors.IsNotFound(err) { + return false + } + if err != nil { + logs.FromContext(ctx).Error(err, "Failed to check pooler existence", "type", poolerType) + return false + } + return true +} + +// deleteConnectionPoolers removes RW and RO pooler resources if they exist. +func (r *PostgresClusterReconciler) deleteConnectionPoolers(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster) error { + logger := logs.FromContext(ctx) + + for _, poolerType := range []string{readWriteEndpoint, readOnlyEndpoint} { + poolerName := poolerResourceName(postgresCluster.Name, poolerType) + exists := r.poolerExists(ctx, postgresCluster, poolerType) + if !exists { + continue + } + + pooler := &cnpgv1.Pooler{} + if err := r.Get(ctx, types.NamespacedName{ + Name: poolerName, + Namespace: postgresCluster.Namespace, + }, pooler); err != nil { + if apierrors.IsNotFound(err) { + continue + } + return fmt.Errorf("failed to get pooler %s: %w", poolerName, err) + } + + logger.Info("Deleting CNPG Pooler", "name", poolerName) + if err := r.Delete(ctx, pooler); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete pooler %s: %w", poolerName, err) + } + } + return nil +} + +// createConnectionPooler creates a CNPG Pooler resource when it is missing. +// Existing poolers are left unchanged by design. +func (r *PostgresClusterReconciler) createConnectionPooler( + ctx context.Context, + postgresCluster *enterprisev4.PostgresCluster, + mergedConfig *EffectiveClusterConfig, + cnpgCluster *cnpgv1.Cluster, + poolerType string, +) error { + poolerName := poolerResourceName(postgresCluster.Name, poolerType) + + existingPooler := &cnpgv1.Pooler{} + err := r.Get(ctx, types.NamespacedName{ + Name: poolerName, + Namespace: postgresCluster.Namespace, + }, existingPooler) + + if apierrors.IsNotFound(err) { + logs.FromContext(ctx).Info("Creating CNPG Pooler", "name", poolerName, "type", poolerType) + pooler := r.buildCNPGPooler(postgresCluster, mergedConfig, cnpgCluster, poolerType) + return r.Create(ctx, pooler) + } + + return err +} + +// buildCNPGPooler builds the desired CNPG Pooler object for the given pooler type. +func (r *PostgresClusterReconciler) buildCNPGPooler( + postgresCluster *enterprisev4.PostgresCluster, + mergedConfig *EffectiveClusterConfig, + cnpgCluster *cnpgv1.Cluster, + poolerType string, +) *cnpgv1.Pooler { + cfg := mergedConfig.ProvisionerConfig.ConnectionPooler + poolerName := poolerResourceName(postgresCluster.Name, poolerType) + + instances := *cfg.Instances + mode := cnpgv1.PgBouncerPoolMode(*cfg.Mode) + + pooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: poolerName, + Namespace: postgresCluster.Namespace, + }, + Spec: cnpgv1.PoolerSpec{ + Cluster: cnpgv1.LocalObjectReference{ + Name: cnpgCluster.Name, + }, + Instances: &instances, + Type: cnpgv1.PoolerType(poolerType), + PgBouncer: &cnpgv1.PgBouncerSpec{ + PoolMode: mode, + Parameters: cfg.Config, + }, + }, + } + + ctrl.SetControllerReference(postgresCluster, pooler, r.Scheme) + return pooler +} + +// syncStatus maps CNPG Cluster state onto PostgresCluster status and refreshes ProvisionerRef. +func (r *PostgresClusterReconciler) syncStatus( + postgresCluster *enterprisev4.PostgresCluster, + cnpgCluster *cnpgv1.Cluster, +) (metav1.ConditionStatus, conditionReasons, string, reconcileClusterPhases) { + postgresCluster.Status.ProvisionerRef = &corev1.ObjectReference{ + APIVersion: "postgresql.cnpg.io/v1", + Kind: "Cluster", + Namespace: cnpgCluster.Namespace, + Name: cnpgCluster.Name, + UID: cnpgCluster.UID, + } + + // Map CNPG Phase to PostgresCluster Phase/Conditions + var clusterPhase reconcileClusterPhases + var conditionStatus metav1.ConditionStatus + var reason conditionReasons + var message string + + switch cnpgCluster.Status.Phase { + case cnpgv1.PhaseHealthy: + clusterPhase = readyClusterPhase + conditionStatus = metav1.ConditionTrue + reason = reasonCNPGClusterHealthy + message = "Cluster is up and running" + + case cnpgv1.PhaseFirstPrimary, + cnpgv1.PhaseCreatingReplica, + cnpgv1.PhaseWaitingForInstancesToBeActive: + clusterPhase = provisioningClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGProvisioning + message = fmt.Sprintf("CNPG cluster provisioning: %s", cnpgCluster.Status.Phase) + + case cnpgv1.PhaseSwitchover: + clusterPhase = configuringClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGSwitchover + message = "Cluster changing primary node" + + case cnpgv1.PhaseFailOver: + clusterPhase = configuringClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGFailingOver + message = "Pod missing, need to change primary" + + case cnpgv1.PhaseInplacePrimaryRestart, + cnpgv1.PhaseInplaceDeletePrimaryRestart: + clusterPhase = configuringClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGRestarting + message = fmt.Sprintf("CNPG cluster restarting: %s", cnpgCluster.Status.Phase) + + case cnpgv1.PhaseUpgrade, + cnpgv1.PhaseMajorUpgrade, + cnpgv1.PhaseUpgradeDelayed, + cnpgv1.PhaseOnlineUpgrading: + clusterPhase = configuringClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGUpgrading + message = fmt.Sprintf("CNPG cluster upgrading: %s", cnpgCluster.Status.Phase) + + case cnpgv1.PhaseApplyingConfiguration: + clusterPhase = configuringClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGApplyingConfig + message = "Configuration change is being applied" + + case cnpgv1.PhaseReplicaClusterPromotion: + clusterPhase = configuringClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGPromoting + message = "Replica is being promoted to primary" + + case cnpgv1.PhaseWaitingForUser: + clusterPhase = failedClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGWaitingForUser + message = "Action from the user is required" + + case cnpgv1.PhaseUnrecoverable: + clusterPhase = failedClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGUnrecoverable + message = "Cluster failed, needs manual intervention" + + case cnpgv1.PhaseCannotCreateClusterObjects: + clusterPhase = failedClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGProvisioningFailed + message = "Cluster resources cannot be created" + + case cnpgv1.PhaseUnknownPlugin, + cnpgv1.PhaseFailurePlugin: + clusterPhase = failedClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGPluginError + message = fmt.Sprintf("CNPG plugin error: %s", cnpgCluster.Status.Phase) + + case cnpgv1.PhaseImageCatalogError, + cnpgv1.PhaseArchitectureBinaryMissing: + clusterPhase = failedClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGImageError + message = fmt.Sprintf("CNPG image error: %s", cnpgCluster.Status.Phase) + + case "": + clusterPhase = pendingClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGProvisioning + message = "CNPG cluster is pending creation" + + default: + clusterPhase = provisioningClusterPhase + conditionStatus = metav1.ConditionFalse + reason = reasonCNPGProvisioning + message = fmt.Sprintf("CNPG cluster clusterPhase: %s", cnpgCluster.Status.Phase) + } + return conditionStatus, reason, message, clusterPhase + +} + +// updateStatus is a convenience wrapper that updates a condition and the phase together. +// For cases where you need to update multiple conditions before persisting, use updateCondition instead. +func (r *PostgresClusterReconciler) updateStatus( + postgresCluster *enterprisev4.PostgresCluster, + conditionType conditionTypes, + status metav1.ConditionStatus, + reason conditionReasons, + message string, + phase reconcileClusterPhases, +) { + r.updateCondition(postgresCluster, conditionType, status, reason, message) + postgresCluster.Status.Phase = string(phase) +} + +// updateCondition updates a single status condition in memory without persisting. +// Call persistStatus after updating all desired conditions. +func (r *PostgresClusterReconciler) updateCondition( + postgresCluster *enterprisev4.PostgresCluster, + conditionType conditionTypes, + status metav1.ConditionStatus, + reason conditionReasons, + message string, +) { + meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ + Type: string(conditionType), + Status: status, + Reason: string(reason), + Message: message, + ObservedGeneration: postgresCluster.Generation, + }) +} + +// persistStatus persists status changes and converts update conflicts into reconcile retries. +func (r *PostgresClusterReconciler) persistStatus( + ctx context.Context, + postgresCluster *enterprisev4.PostgresCluster, + persistedStatus *enterprisev4.PostgresClusterStatus, +) (ctrl.Result, error) { + if persistErr := r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus); persistErr != nil { + if apierrors.IsConflict(persistErr) { + logs.FromContext(ctx).Info("Conflict while updating status, will retry on next reconcile") + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, persistErr + } + return ctrl.Result{}, nil +} + +// persistStatusIfChanged updates status only when it differs from the last persisted snapshot. +func (r *PostgresClusterReconciler) persistStatusIfChanged(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, lastPostgresClusterStatus *enterprisev4.PostgresClusterStatus) error { + if !equality.Semantic.DeepEqual(postgresCluster.Status, *lastPostgresClusterStatus) { + if err := r.Status().Update(ctx, postgresCluster); err != nil { + return err + } + *lastPostgresClusterStatus = *postgresCluster.Status.DeepCopy() + } + return nil +} + +// syncPoolerStatus populates ConnectionPoolerStatus and returns a summary message. +// Callers are responsible for updating PoolerReady after this succeeds. +func (r *PostgresClusterReconciler) syncPoolerStatus(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster) (string, error) { + rwPooler := &cnpgv1.Pooler{} + if rwErr := r.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readWriteEndpoint), + Namespace: postgresCluster.Namespace, + }, rwPooler); rwErr != nil { + return "", rwErr + } + + roPooler := &cnpgv1.Pooler{} + if roErr := r.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readOnlyEndpoint), + Namespace: postgresCluster.Namespace, + }, roPooler); roErr != nil { + return "", roErr + } + + postgresCluster.Status.ConnectionPoolerStatus = &enterprisev4.ConnectionPoolerStatus{ + Enabled: true, + } + + rwDesired, rwScheduled := r.getPoolerInstanceCount(rwPooler) + roDesired, roScheduled := r.getPoolerInstanceCount(roPooler) + + return fmt.Sprintf("%s: %d/%d, %s: %d/%d", + readWriteEndpoint, rwScheduled, rwDesired, + readOnlyEndpoint, roScheduled, roDesired, + ), nil +} + +// isPoolerReady checks if a pooler has all instances scheduled. +// Note: CNPG PoolerStatus only tracks scheduled instances, not ready pods. +func (r *PostgresClusterReconciler) isPoolerReady(pooler *cnpgv1.Pooler) bool { + desiredInstances := int32(1) + if pooler.Spec.Instances != nil { + desiredInstances = *pooler.Spec.Instances + } + return pooler.Status.Instances >= desiredInstances +} + +// getPoolerInstanceCount returns the number of scheduled instances for a pooler. +func (r *PostgresClusterReconciler) getPoolerInstanceCount(pooler *cnpgv1.Pooler) (desired int32, scheduled int32) { + desired = int32(1) + if pooler.Spec.Instances != nil { + desired = *pooler.Spec.Instances + } + return desired, pooler.Status.Instances +} + +// arePoolersReady checks if both RW and RO poolers have all instances scheduled. +func (r *PostgresClusterReconciler) arePoolersReady(rwPooler, roPooler *cnpgv1.Pooler) bool { + return r.isPoolerReady(rwPooler) && r.isPoolerReady(roPooler) +} + +// normalizeManagedRole projects a CNPG RoleConfiguration down to only the fields this controller controls. +// CNPG's admission webhook populates defaults on the live object (ConnectionLimit: -1, Inherit: true) +// that are absent from our desired slice — normalizing both sides before comparison prevents a +// permanent diff that would re-patch on every reconcile. +func normalizeManagedRole(r cnpgv1.RoleConfiguration) normalizedManagedRole { + secret := "" + if r.PasswordSecret != nil { + secret = r.PasswordSecret.Name + } + return normalizedManagedRole{ + Name: r.Name, + Ensure: r.Ensure, + Login: r.Login, + PasswordSecret: secret, + } +} + +// normalizeManagedRoles applies normalizeManagedRole to each RoleConfiguration in the slice. +func normalizeManagedRoles(roles []cnpgv1.RoleConfiguration) []normalizedManagedRole { + result := make([]normalizedManagedRole, 0, len(roles)) + for _, r := range roles { + result = append(result, normalizeManagedRole(r)) + } + return result +} + +// buildCNPGRole converts a single PostgresCluster ManagedRole to its CNPG RoleConfiguration equivalent. +// Absent roles are marked for removal; Login is only meaningful for present roles. +func buildCNPGRole(role enterprisev4.ManagedRole) cnpgv1.RoleConfiguration { + cnpgRole := cnpgv1.RoleConfiguration{ + Name: role.Name, + } + if role.Ensure == "absent" { + cnpgRole.Ensure = cnpgv1.EnsureAbsent + } else { + cnpgRole.Ensure = cnpgv1.EnsurePresent + cnpgRole.Login = true + } + if role.PasswordSecretRef != nil { + cnpgRole.PasswordSecret = &cnpgv1.LocalObjectReference{Name: role.PasswordSecretRef.Name} + } + return cnpgRole +} + +// reconcileManagedRoles synchronizes ManagedRoles from PostgresCluster spec to CNPG Cluster managed.roles. +func (r *PostgresClusterReconciler) reconcileManagedRoles(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster) error { + logger := logs.FromContext(ctx) + + desired := make([]cnpgv1.RoleConfiguration, 0, len(postgresCluster.Spec.ManagedRoles)) + for _, role := range postgresCluster.Spec.ManagedRoles { + desired = append(desired, buildCNPGRole(role)) + } + + var current []cnpgv1.RoleConfiguration + if cnpgCluster.Spec.Managed != nil { + current = cnpgCluster.Spec.Managed.Roles + } + + if equality.Semantic.DeepEqual(normalizeManagedRoles(current), normalizeManagedRoles(desired)) { + logger.Info("CNPG Cluster roles already match desired state, no update needed") + return nil + } + + logger.Info("Detected drift in managed roles, patching", "count", len(desired)) + originalCluster := cnpgCluster.DeepCopy() + if cnpgCluster.Spec.Managed == nil { + cnpgCluster.Spec.Managed = &cnpgv1.ManagedConfiguration{} + } + cnpgCluster.Spec.Managed.Roles = desired + + if err := r.patchObject(ctx, originalCluster, cnpgCluster, "CNPGCluster"); err != nil { + return fmt.Errorf("patching managed roles: %w", err) + } + + logger.Info("Successfully updated managed roles", "count", len(desired)) + return nil +} + +// normalizedCNPGClusterSpec is a subset of cnpgv1.ClusterSpec fields that we care about for drift detection. +// Any field that is included in buildCNPGClusterSpec and should be considered for drift detection must be added here, and populated in normalizeCNPGClusterSpec. +func normalizeCNPGClusterSpec(spec cnpgv1.ClusterSpec, customDefinedParameters map[string]string) normalizedCNPGClusterSpec { + normalizedConf := normalizedCNPGClusterSpec{ + ImageName: spec.ImageName, + Instances: spec.Instances, + // Parameters intentionally excluded — CNPG injects defaults that we don't change + StorageSize: spec.StorageConfiguration.Size, + Resources: spec.Resources, + } + + if len(customDefinedParameters) > 0 { + normalizedConf.CustomDefinedParameters = make(map[string]string) + for k := range customDefinedParameters { + normalizedConf.CustomDefinedParameters[k] = spec.PostgresConfiguration.Parameters[k] + } + } + if len(spec.PostgresConfiguration.PgHBA) > 0 { + normalizedConf.PgHBA = spec.PostgresConfiguration.PgHBA + } + + if spec.Bootstrap != nil && spec.Bootstrap.InitDB != nil { + normalizedConf.DefaultDatabase = spec.Bootstrap.InitDB.Database + normalizedConf.Owner = spec.Bootstrap.InitDB.Owner + } + return normalizedConf +} + +// generateConfigMap builds the desired ConfigMap with connection details for the PostgresCluster. +func (r *PostgresClusterReconciler) generateConfigMap( + postgresCluster *enterprisev4.PostgresCluster, + secretName string, + poolerEnabled bool, +) (*corev1.ConfigMap, error) { + configMapName := fmt.Sprintf("%s%s", postgresCluster.Name, defaultConfigMapSuffix) + if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.ConfigMapRef != nil { + configMapName = postgresCluster.Status.Resources.ConfigMapRef.Name + } + + data := map[string]string{ + "CLUSTER_RW_ENDPOINT": fmt.Sprintf("%s-rw.%s", postgresCluster.Name, postgresCluster.Namespace), + "CLUSTER_RO_ENDPOINT": fmt.Sprintf("%s-ro.%s", postgresCluster.Name, postgresCluster.Namespace), + "CLUSTER_R_ENDPOINT": fmt.Sprintf("%s-r.%s", postgresCluster.Name, postgresCluster.Namespace), + "DEFAULT_CLUSTER_PORT": defaultPort, + "SUPER_USER_NAME": superUsername, + "SUPER_USER_SECRET_REF": secretName, + } + + if poolerEnabled { + data["CLUSTER_POOLER_RW_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(postgresCluster.Name, readWriteEndpoint), postgresCluster.Namespace) + data["CLUSTER_POOLER_RO_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(postgresCluster.Name, readOnlyEndpoint), postgresCluster.Namespace) + } + + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: configMapName, + Namespace: postgresCluster.Namespace, + Labels: map[string]string{"app.kubernetes.io/managed-by": "postgrescluster-controller"}, + }, + Data: data, + } + if err := ctrl.SetControllerReference(postgresCluster, configMap, r.Scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + return configMap, nil +} + +// generateSecret creates the superuser Secret when it is missing. +func (r *PostgresClusterReconciler) generateSecret(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, secretName string) error { + existing := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: postgresCluster.Namespace}, existing) + + // If secret does not exist, create it + if apierrors.IsNotFound(err) { + password, err := generatePassword() + if err != nil { + return err + } + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: postgresCluster.Namespace, + }, + StringData: map[string]string{ + "username": superUsername, + "password": password, + }, + Type: corev1.SecretTypeOpaque, + } + // Set owner reference + if err := ctrl.SetControllerReference(postgresCluster, secret, r.Scheme); err != nil { + return err + } + if err := r.Create(ctx, secret); err != nil { + return err + } + } else if err != nil { + return err + } + return nil +} + +// deleteCNPGCluster deletes the CNPG Cluster resource if it exists. +func (r *PostgresClusterReconciler) deleteCNPGCluster(ctx context.Context, cnpgCluster *cnpgv1.Cluster) error { + logger := logs.FromContext(ctx) + // TODO: add logic to decide to delete cluster if one has customer DBs configured, to prevent data loss + if cnpgCluster == nil { + logger.Info("CNPG Cluster not found, skipping deletion") + return nil + } + logger.Info("Deleting CNPG Cluster", "name", cnpgCluster.Name) + if err := r.Delete(ctx, cnpgCluster); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete CNPG Cluster: %w", err) + } + return nil +} + +// handleFinalizer performs deletion-time cleanup and removes the finalizer when cleanup succeeds. +func (r *PostgresClusterReconciler) handleFinalizer(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, secret *corev1.Secret, cnpgCluster *cnpgv1.Cluster) error { + logger := logs.FromContext(ctx) + if postgresCluster.GetDeletionTimestamp() == nil { + logger.Info("PostgresCluster not marked for deletion, skipping finalizer logic") + return nil + } + if !controllerutil.ContainsFinalizer(postgresCluster, postgresClusterFinalizerName) { + logger.Info("Finalizer not present on PostgresCluster, skipping finalizer logic") + return nil + } + if cnpgCluster == nil { + cnpgCluster = &cnpgv1.Cluster{} + } + + err := r.Get(ctx, types.NamespacedName{ + Name: postgresCluster.Name, + Namespace: postgresCluster.Namespace, + }, cnpgCluster) + if err != nil { + if apierrors.IsNotFound(err) { + cnpgCluster = nil + logger.Info("CNPG cluster not found during cleanup") + } else { + return fmt.Errorf("failed to fetch CNPG cluster during cleanup: %w", err) + } + } + logger.Info("Processing finalizer cleanup for PostgresCluster") + + // Always delete connection poolers if they exist. + if err := r.deleteConnectionPoolers(ctx, postgresCluster); err != nil { + logger.Error(err, "Failed to delete connection poolers during cleanup") + return fmt.Errorf("failed to delete connection poolers: %w", err) + } + + switch postgresCluster.Spec.ClusterDeletionPolicy { + case clusterDeletionPolicyDelete: + logger.Info("ClusterDeletionPolicy is 'Delete', proceeding to delete CNPG Cluster and associated resources") + if cnpgCluster != nil { + if err := r.deleteCNPGCluster(ctx, cnpgCluster); err != nil { + logger.Error(err, "Failed to delete CNPG Cluster during finalizer cleanup") + return fmt.Errorf("failed to delete CNPG Cluster during finalizer cleanup: %w", err) + } + } + logger.Info("CNPG Cluster not found") + case clusterDeletionPolicyRetain: + logger.Info("ClusterDeletionPolicy is 'Retain', proceeding to remove owner references and retain CNPG Cluster") + // Remove owner reference from CNPG Cluster to prevent its deletion. + if cnpgCluster != nil { + originalCNPG := cnpgCluster.DeepCopy() + refRemoved, err := r.removeOwnerRef(postgresCluster, cnpgCluster, "CNPGCluster") + if err != nil { + return fmt.Errorf("failed to remove owner reference from CNPG cluster: %w", err) + } + if !refRemoved { + logger.Info("Owner reference already removed/not set from CNPG Cluster, skipping patch") + } + if err := r.patchObject(ctx, originalCNPG, cnpgCluster, "CNPGCluster"); err != nil { + return fmt.Errorf("failed to patch CNPG cluster after removing owner reference: %w", err) + } + logger.Info("Removed owner reference from CNPG Cluster") + } + // Remove owner reference from Secret to prevent its deletion. + if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.SecretRef != nil { + secretName := postgresCluster.Status.Resources.SecretRef.Name + if err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: postgresCluster.Namespace}, secret); err != nil { + if !apierrors.IsNotFound(err) { + logger.Error(err, "Failed to fetch Secret during cleanup") + return fmt.Errorf("failed to fetch secret during cleanup: %w", err) + } + logger.Info("Secret not found, skipping owner reference removal", "secret", secretName) + } + if secret != nil { + originalSecret := secret.DeepCopy() + refRemoved, err := r.removeOwnerRef(postgresCluster, secret, "Secret") + if err != nil { + return fmt.Errorf("failed to remove owner reference from Secret: %w", err) + } + if refRemoved { + if err := r.patchObject(ctx, originalSecret, secret, "Secret"); err != nil { + return fmt.Errorf("failed to patch Secret after removing owner reference: %w", err) + } + } + logger.Info("Removed owner reference from Secret") + } + } + default: + logger.Info("Unknown ClusterDeletionPolicy", "policy", postgresCluster.Spec.ClusterDeletionPolicy) + } + + // Remove finalizer after successful cleanup + controllerutil.RemoveFinalizer(postgresCluster, postgresClusterFinalizerName) + if err := r.Update(ctx, postgresCluster); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresCluster already deleted, skipping finalizer update") + return nil + } + logger.Error(err, "Failed to remove finalizer from PostgresCluster") + return fmt.Errorf("failed to remove finalizer: %w", err) + } + + logger.Info("Finalizer removed, cleanup complete") + return nil +} + +// clusterSecretExists returns whether the secret is present and propagates lookup errors. +func (r *PostgresClusterReconciler) clusterSecretExists(ctx context.Context, namespace, secretName string, secret *corev1.Secret) (clusterSecretExists bool, secretExistErr error) { + logger := logs.FromContext(ctx) + err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, secret) + if apierrors.IsNotFound(err) { + return false, nil + } + if err != nil { + logger.Error(err, "Failed to check secret existence", "secret", secretName) + return false, err + } + logger.Info("Secret already exists", "secret", secretName) + return true, nil +} + +// removeOwnerRef removes the owner's reference from the object and reports whether it changed the object. +func (r *PostgresClusterReconciler) removeOwnerRef(owner client.Object, obj client.Object, objKind objectKind) (bool, error) { + hasOwnerRef, err := controllerutil.HasOwnerReference(obj.GetOwnerReferences(), owner, r.Scheme) + + if err != nil { + return false, fmt.Errorf("failed to check owner reference on %s: %w", objKind, err) + } + if !hasOwnerRef { + return false, nil + } + if err := controllerutil.RemoveOwnerReference(owner, obj, r.Scheme); err != nil { + return false, fmt.Errorf("failed to remove owner reference from %s: %w", objKind, err) + } + return true, nil +} + +// restoreOwnerRef adds the PostgresCluster owner reference back to an existing object when it is missing. +func (r *PostgresClusterReconciler) restoreOwnerRef(ctx context.Context, owner client.Object, obj client.Object, objKind objectKind) (bool, error) { + hasOwnerRef, err := controllerutil.HasOwnerReference(obj.GetOwnerReferences(), owner, r.Scheme) + if err != nil { + return false, fmt.Errorf("failed to check owner reference on %s: %w", objKind, err) + } + if hasOwnerRef { + return false, nil + } + + logger := logs.FromContext(ctx) + logger.Info("Connecting existing object to PostgresCluster by adding owner reference", "kind", objKind, "name", obj.GetName()) + + originalObj, ok := obj.DeepCopyObject().(client.Object) + if !ok { + return false, fmt.Errorf("failed to deep copy %s object", objKind) + } + + if err := ctrl.SetControllerReference(owner, obj, r.Scheme); err != nil { + return false, fmt.Errorf("failed to set controller reference on existing %s: %w", objKind, err) + } + + if err := r.patchObject(ctx, originalObj, obj, objKind); err != nil { + return false, err + } + + return true, nil +} + +// patchObject applies a merge patch and treats NotFound as already converged. +func (r *PostgresClusterReconciler) patchObject(ctx context.Context, original client.Object, obj client.Object, objKind objectKind) error { + logger := logs.FromContext(ctx) + if err := r.Patch(ctx, obj, client.MergeFrom(original)); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("Object not found, skipping patch", "kind", objKind, "name", obj.GetName()) + return nil + } + return fmt.Errorf("failed to patch %s object: %w", objKind, err) + } + logger.Info("Patched object successfully", "kind", objKind, "name", obj.GetName()) + return nil +} + +// SetupWithManager registers the controller for PostgresCluster resources and owned CNPG Clusters. +func (r *PostgresClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterprisev4.PostgresCluster{}, builder.WithPredicates(postgresClusterPredicator())). + Owns(&cnpgv1.Cluster{}, builder.WithPredicates(cnpgClusterPredicator())). + Owns(&cnpgv1.Pooler{}, builder.WithPredicates(cnpgPoolerPredicator())). + Owns(&corev1.Secret{}, builder.WithPredicates(secretPredicator())). + Owns(&corev1.ConfigMap{}, builder.WithPredicates(configMapPredicator())). + Named("postgresCluster"). + Complete(r) +} + +func deletionTimestampChanged(oldObj, newObj metav1.Object) bool { + return !equality.Semantic.DeepEqual(oldObj.GetDeletionTimestamp(), newObj.GetDeletionTimestamp()) +} + +func ownerReferencesChanged(oldObj, newObj metav1.Object) bool { + return !equality.Semantic.DeepEqual(oldObj.GetOwnerReferences(), newObj.GetOwnerReferences()) +} + +// cnpgClusterPredicator filters CNPG Cluster events to only trigger reconciles on creation, deletion, or phase changes. +func cnpgClusterPredicator() predicate.Predicate { + + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + return true + }, + DeleteFunc: func(event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldTypeOK := e.ObjectOld.(*cnpgv1.Cluster) + newObj, newTypeOK := e.ObjectNew.(*cnpgv1.Cluster) + if !oldTypeOK || !newTypeOK { + return true + } + return oldObj.Status.Phase != newObj.Status.Phase || + ownerReferencesChanged(oldObj, newObj) + }, + GenericFunc: func(event.GenericEvent) bool { + return false + }, + } +} + +// postgresClusterPredicator filters PostgresCluster events to trigger reconciles on creation, deletion, generation changes, deletion timestamp changes, or finalizer changes. +func postgresClusterPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + return true + }, + DeleteFunc: func(event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldTypeOK := e.ObjectOld.(*enterprisev4.PostgresCluster) + newObj, newTypeOK := e.ObjectNew.(*enterprisev4.PostgresCluster) + if !oldTypeOK || !newTypeOK { + return true + } + if oldObj.Generation != newObj.Generation { + return true + } + if deletionTimestampChanged(oldObj, newObj) { + return true + } + if postgresClusterFinalizerName != "" && (controllerutil.ContainsFinalizer(oldObj, postgresClusterFinalizerName) != controllerutil.ContainsFinalizer(newObj, postgresClusterFinalizerName)) { + return true + } + return false + }, + GenericFunc: func(event.GenericEvent) bool { + return false + }, + } +} + +// cnpgPoolerPredicator filters CNPG Pooler events to trigger reconciles on creation, deletion, or instance count changes. +func cnpgPoolerPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + return true + }, + DeleteFunc: func(event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldTypeOK := e.ObjectOld.(*cnpgv1.Pooler) + newObj, newTypeOK := e.ObjectNew.(*cnpgv1.Pooler) + if !oldTypeOK || !newTypeOK { + return true + } + return oldObj.Status.Instances != newObj.Status.Instances + }, + GenericFunc: func(event.GenericEvent) bool { + return false + }, + } +} + +// secretPredicator filters Secret events to trigger reconciles on creation, deletion, or owner reference changes. +func secretPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + return true + }, + DeleteFunc: func(event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldTypeOK := e.ObjectOld.(*corev1.Secret) + newObj, newTypeOK := e.ObjectNew.(*corev1.Secret) + if !oldTypeOK || !newTypeOK { + return true + } + return ownerReferencesChanged(oldObj, newObj) + }, + GenericFunc: func(event.GenericEvent) bool { + return false + }, + } +} + +// configMapPredicator filters ConfigMap events to trigger reconciles on creation, deletion, data/label/annotation changes, or owner reference changes. +func configMapPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { + return true + }, + DeleteFunc: func(event.DeleteEvent) bool { + return true + }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldTypeOK := e.ObjectOld.(*corev1.ConfigMap) + newObj, newTypeOK := e.ObjectNew.(*corev1.ConfigMap) + if !oldTypeOK || !newTypeOK { + return true + } + return !equality.Semantic.DeepEqual(oldObj.Data, newObj.Data) || + !equality.Semantic.DeepEqual(oldObj.Labels, newObj.Labels) || + !equality.Semantic.DeepEqual(oldObj.Annotations, newObj.Annotations) || + ownerReferencesChanged(oldObj, newObj) + }, + GenericFunc: func(event.GenericEvent) bool { + return false + }, + } +} diff --git a/internal/controller/postgrescluster_controller_test.go b/internal/controller/postgrescluster_controller_test.go new file mode 100644 index 000000000..c0f3493d9 --- /dev/null +++ b/internal/controller/postgrescluster_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" +) + +var _ = Describe("PostgresCluster Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + postgresCluster := &enterprisev4.PostgresCluster{} + + BeforeEach(func() { + By("creating the custom resource for the Kind PostgresCluster") + err := k8sClient.Get(ctx, typeNamespacedName, postgresCluster) + if err != nil && errors.IsNotFound(err) { + resource := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &enterprisev4.PostgresCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance PostgresCluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &PostgresClusterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/postgrescluster_controller_unit_test.go b/internal/controller/postgrescluster_controller_unit_test.go new file mode 100644 index 000000000..ad4cd389c --- /dev/null +++ b/internal/controller/postgrescluster_controller_unit_test.go @@ -0,0 +1,1010 @@ +package controller + +import ( + "context" + "testing" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestPoolerResourceName(t *testing.T) { + tests := []struct { + name string + clusterName string + poolerType string + expected string + }{ + { + name: "read-write pooler", + clusterName: "my-cluster", + poolerType: "rw", + expected: "my-cluster-pooler-rw", + }, + { + name: "cluster name with mixed case and alphanumeric suffix", + clusterName: "My-Cluster-12x2f", + poolerType: "rw", + expected: "My-Cluster-12x2f-pooler-rw", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := poolerResourceName(tt.clusterName, tt.poolerType) + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestIsPoolerReady(t *testing.T) { + r := &PostgresClusterReconciler{} + tests := []struct { + name string + pooler *cnpgv1.Pooler + expected bool + }{ + { + name: "nil instances defaults desired to 1, zero scheduled means not ready", + pooler: &cnpgv1.Pooler{ + Status: cnpgv1.PoolerStatus{Instances: 0}, + }, + expected: false, + }, + { + name: "nil instances defaults desired to 1, one scheduled means ready", + pooler: &cnpgv1.Pooler{ + Status: cnpgv1.PoolerStatus{Instances: 1}, + }, + expected: true, + }, + { + name: "scheduled meets desired", + pooler: &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(3))}, + Status: cnpgv1.PoolerStatus{Instances: 3}, + }, + expected: true, + }, + { + name: "scheduled below desired", + pooler: &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(3))}, + Status: cnpgv1.PoolerStatus{Instances: 2}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := r.isPoolerReady(tt.pooler) + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestNormalizeCNPGClusterSpec(t *testing.T) { + tests := []struct { + name string + spec cnpgv1.ClusterSpec + customDefinedParameters map[string]string + expected normalizedCNPGClusterSpec + }{ + { + name: "basic fields are copied", + spec: cnpgv1.ClusterSpec{ + ImageName: "ghcr.io/cloudnative-pg/postgresql:18", + Instances: 3, + StorageConfiguration: cnpgv1.StorageConfiguration{Size: "10Gi"}, + }, + customDefinedParameters: nil, + expected: normalizedCNPGClusterSpec{ + ImageName: "ghcr.io/cloudnative-pg/postgresql:18", + Instances: 3, + StorageSize: "10Gi", + }, + }, + { + name: "CNPG-injected parameters are excluded from comparison", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + Parameters: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + "cnpg_injected": "should-not-appear", + }, + }, + }, + customDefinedParameters: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + CustomDefinedParameters: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + }, + }, + }, + { + name: "empty custom params does not populate CustomDefinedParameters", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + Parameters: map[string]string{"cnpg_injected": "val"}, + }, + }, + customDefinedParameters: map[string]string{}, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + }, + { + name: "PgHBA included when non-empty", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + PgHBA: []string{"hostssl all all 0.0.0.0/0 scram-sha-256"}, + }, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + PgHBA: []string{"hostssl all all 0.0.0.0/0 scram-sha-256"}, + }, + }, + { + name: "empty PgHBA is excluded", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + PgHBA: []string{}, + }, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + }, + { + name: "bootstrap populates database and owner", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + Bootstrap: &cnpgv1.BootstrapConfiguration{ + InitDB: &cnpgv1.BootstrapInitDB{ + Database: "mydb", + Owner: "admin", + }, + }, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + DefaultDatabase: "mydb", + Owner: "admin", + }, + }, + { + name: "nil bootstrap leaves database and owner empty", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := normalizeCNPGClusterSpec(tt.spec, tt.customDefinedParameters) + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestGetMergedConfig(t *testing.T) { + r := &PostgresClusterReconciler{} + + classInstances := int32(1) + classVersion := "17" + classStorage := resource.MustParse("50Gi") + baseClass := &enterprisev4.PostgresClusterClass{ + ObjectMeta: metav1.ObjectMeta{Name: "standard"}, + Spec: enterprisev4.PostgresClusterClassSpec{ + Config: enterprisev4.PosgresClusterClassConfig{ + Instances: &classInstances, + PostgresVersion: &classVersion, + Storage: &classStorage, + Resources: &corev1.ResourceRequirements{}, + PostgreSQLConfig: map[string]string{"shared_buffers": "128MB"}, + PgHBA: []string{"host all all 0.0.0.0/0 md5"}, + }, + CNPG: &enterprisev4.CNPGConfig{PrimaryUpdateMethod: "switchover"}, + }, + } + + t.Run("cluster spec overrides class defaults", func(t *testing.T) { + overrideInstances := int32(5) + overrideVersion := "18" + overrideStorage := resource.MustParse("100Gi") + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{ + Instances: &overrideInstances, + PostgresVersion: &overrideVersion, + Storage: &overrideStorage, + PostgreSQLConfig: map[string]string{"max_connections": "200"}, + PgHBA: []string{"hostssl all all 0.0.0.0/0 scram-sha-256"}, + }, + } + + cfg, err := r.getMergedConfig(baseClass, cluster) + + require.NoError(t, err) + assert.Equal(t, int32(5), *cfg.ClusterSpec.Instances) + assert.Equal(t, "18", *cfg.ClusterSpec.PostgresVersion) + assert.Equal(t, "100Gi", cfg.ClusterSpec.Storage.String()) + assert.Equal(t, "200", cfg.ClusterSpec.PostgreSQLConfig["max_connections"]) + assert.Equal(t, "hostssl all all 0.0.0.0/0 scram-sha-256", cfg.ClusterSpec.PgHBA[0]) + }) + + t.Run("class defaults fill in nil cluster fields", func(t *testing.T) { + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + cfg, err := r.getMergedConfig(baseClass, cluster) + + require.NoError(t, err) + assert.Equal(t, int32(1), *cfg.ClusterSpec.Instances) + assert.Equal(t, "17", *cfg.ClusterSpec.PostgresVersion) + assert.Equal(t, "50Gi", cfg.ClusterSpec.Storage.String()) + assert.Equal(t, "128MB", cfg.ClusterSpec.PostgreSQLConfig["shared_buffers"]) + }) + + t.Run("returns error when required fields missing from both", func(t *testing.T) { + emptyClass := &enterprisev4.PostgresClusterClass{ + ObjectMeta: metav1.ObjectMeta{Name: "empty"}, + Spec: enterprisev4.PostgresClusterClassSpec{}, + } + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + _, err := r.getMergedConfig(emptyClass, cluster) + + require.Error(t, err) + }) + + t.Run("CNPG config comes from class not cluster", func(t *testing.T) { + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + cfg, err := r.getMergedConfig(baseClass, cluster) + + require.NoError(t, err) + require.NotNil(t, cfg.ProvisionerConfig) + assert.Equal(t, "switchover", cfg.ProvisionerConfig.PrimaryUpdateMethod) + }) + + t.Run("nil maps and slices initialized to safe zero values", func(t *testing.T) { + classWithNoMaps := &enterprisev4.PostgresClusterClass{ + ObjectMeta: metav1.ObjectMeta{Name: "minimal"}, + Spec: enterprisev4.PostgresClusterClassSpec{ + Config: enterprisev4.PosgresClusterClassConfig{ + Instances: &classInstances, + PostgresVersion: &classVersion, + Storage: &classStorage, + }, + }, + } + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + cfg, err := r.getMergedConfig(classWithNoMaps, cluster) + + require.NoError(t, err) + assert.NotNil(t, cfg.ClusterSpec.PostgreSQLConfig) + assert.NotNil(t, cfg.ClusterSpec.PgHBA) + assert.NotNil(t, cfg.ClusterSpec.Resources) + }) +} + +func TestBuildCNPGClusterSpec(t *testing.T) { + r := &PostgresClusterReconciler{} + + version := "18" + instances := int32(3) + storage := resource.MustParse("50Gi") + cfg := &EffectiveClusterConfig{ + ClusterSpec: &enterprisev4.PostgresClusterSpec{ + PostgresVersion: &version, + Instances: &instances, + Storage: &storage, + PostgreSQLConfig: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + }, + PgHBA: []string{ + "hostssl all all 0.0.0.0/0 scram-sha-256", + "host replication all 10.0.0.0/8 md5", + }, + Resources: &corev1.ResourceRequirements{}, + }, + } + + spec := r.buildCNPGClusterSpec(cfg, "my-secret") + + assert.Equal(t, "ghcr.io/cloudnative-pg/postgresql:18", spec.ImageName) + assert.Equal(t, 3, spec.Instances) + require.NotNil(t, spec.SuperuserSecret) + assert.Equal(t, "my-secret", spec.SuperuserSecret.Name) + assert.Equal(t, "my-secret", spec.Bootstrap.InitDB.Secret.Name) + require.NotNil(t, spec.EnableSuperuserAccess) + assert.True(t, *spec.EnableSuperuserAccess) + assert.Equal(t, "postgres", spec.Bootstrap.InitDB.Database) + assert.Equal(t, "postgres", spec.Bootstrap.InitDB.Owner) + assert.Equal(t, "50Gi", spec.StorageConfiguration.Size) + assert.Equal(t, "256MB", spec.PostgresConfiguration.Parameters["shared_buffers"]) + assert.Equal(t, "200", spec.PostgresConfiguration.Parameters["max_connections"]) + require.Len(t, spec.PostgresConfiguration.PgHBA, 2) + assert.Equal(t, "hostssl all all 0.0.0.0/0 scram-sha-256", spec.PostgresConfiguration.PgHBA[0]) + assert.Equal(t, "host replication all 10.0.0.0/8 md5", spec.PostgresConfiguration.PgHBA[1]) +} + +func TestBuildCNPGPooler(t *testing.T) { + scheme := runtime.NewScheme() + enterprisev4.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + r := &PostgresClusterReconciler{Scheme: scheme} + + poolerInstances := int32(3) + poolerMode := enterprisev4.ConnectionPoolerModeTransaction + postgresCluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "db-ns", + UID: "test-uid", + }, + } + cnpgCluster := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + } + cfg := &EffectiveClusterConfig{ + ProvisionerConfig: &enterprisev4.CNPGConfig{ + ConnectionPooler: &enterprisev4.ConnectionPoolerConfig{ + Instances: &poolerInstances, + Mode: &poolerMode, + Config: map[string]string{"default_pool_size": "25"}, + }, + }, + } + + t.Run("rw pooler", func(t *testing.T) { + pooler := r.buildCNPGPooler(postgresCluster, cfg, cnpgCluster, "rw") + + assert.Equal(t, "my-cluster-pooler-rw", pooler.Name) + assert.Equal(t, "db-ns", pooler.Namespace) + assert.Equal(t, "my-cluster", pooler.Spec.Cluster.Name) + require.NotNil(t, pooler.Spec.Instances) + assert.Equal(t, int32(3), *pooler.Spec.Instances) + assert.Equal(t, cnpgv1.PoolerType("rw"), pooler.Spec.Type) + assert.Equal(t, cnpgv1.PgBouncerPoolMode("transaction"), pooler.Spec.PgBouncer.PoolMode) + assert.Equal(t, "25", pooler.Spec.PgBouncer.Parameters["default_pool_size"]) + require.Len(t, pooler.OwnerReferences, 1) + assert.Equal(t, "test-uid", string(pooler.OwnerReferences[0].UID)) + }) + + t.Run("ro pooler", func(t *testing.T) { + pooler := r.buildCNPGPooler(postgresCluster, cfg, cnpgCluster, "ro") + + assert.Equal(t, "my-cluster-pooler-ro", pooler.Name) + assert.Equal(t, cnpgv1.PoolerType("ro"), pooler.Spec.Type) + }) +} + +func TestBuildCNPGCluster(t *testing.T) { + scheme := runtime.NewScheme() + enterprisev4.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + r := &PostgresClusterReconciler{Scheme: scheme} + + instances := int32(3) + version := "18" + storage := resource.MustParse("50Gi") + postgresCluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "db-ns", + UID: "pg-uid", + }, + } + cfg := &EffectiveClusterConfig{ + ClusterSpec: &enterprisev4.PostgresClusterSpec{ + Instances: &instances, + PostgresVersion: &version, + Storage: &storage, + PostgreSQLConfig: map[string]string{}, + PgHBA: []string{}, + Resources: &corev1.ResourceRequirements{}, + }, + } + + cluster := r.buildCNPGCluster(postgresCluster, cfg, "my-secret") + + assert.Equal(t, "my-cluster", cluster.Name) + assert.Equal(t, "db-ns", cluster.Namespace) + require.Len(t, cluster.OwnerReferences, 1) + assert.Equal(t, "pg-uid", string(cluster.OwnerReferences[0].UID)) + assert.Equal(t, 3, cluster.Spec.Instances) +} + +func TestClusterSecretExists(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + tests := []struct { + name string + objects []client.Object + secretName string + expectedExists bool + }{ + { + name: "returns true when secret exists", + objects: []client.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + }, + }, + }, + secretName: "my-secret", + expectedExists: true, + }, + { + name: "returns false when secret not found", + objects: []client.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other-secret", + Namespace: "default", + }, + }, + }, + secretName: "missing-secret", + expectedExists: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + secret := &corev1.Secret{} + + exists, err := r.clusterSecretExists(context.Background(), "default", tt.secretName, secret) + + require.NoError(t, err) + assert.Equal(t, tt.expectedExists, exists) + }) + } +} + +func TestRemoveOwnerRef(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + r := &PostgresClusterReconciler{Scheme: scheme} + + owner := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "owner-uid", + }, + } + + otherOwnerRef := metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "other-owner", + UID: "other-uid", + } + ourOwnerRef := metav1.OwnerReference{ + APIVersion: "enterprise.splunk.com/v4", + Kind: "PostgresCluster", + Name: "my-cluster", + UID: "owner-uid", + } + + tests := []struct { + name string + ownerRefs []metav1.OwnerReference + expectedRemoved bool + expectedRefsLen int + }{ + { + name: "returns false when owner ref not present", + ownerRefs: nil, + expectedRemoved: false, + expectedRefsLen: 0, + }, + { + name: "removes owner ref and returns true", + ownerRefs: []metav1.OwnerReference{ourOwnerRef}, + expectedRemoved: true, + expectedRefsLen: 0, + }, + { + name: "removes only our owner ref and keeps others", + ownerRefs: []metav1.OwnerReference{otherOwnerRef, ourOwnerRef}, + expectedRemoved: true, + expectedRefsLen: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + OwnerReferences: tt.ownerRefs, + }, + } + + removed, err := r.removeOwnerRef(owner, secret, "Secret") + + require.NoError(t, err) + assert.Equal(t, tt.expectedRemoved, removed) + assert.Len(t, secret.GetOwnerReferences(), tt.expectedRefsLen) + }) + } +} + +func TestPatchObject(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + t.Run("patches object successfully", func(t *testing.T) { + existing := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("old-value")}, + } + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + original := existing.DeepCopy() + existing.Data["key"] = []byte("new-value") + + err := r.patchObject(context.Background(), original, existing, "Secret") + + require.NoError(t, err) + patched := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), client.ObjectKeyFromObject(existing), patched)) + assert.Equal(t, "new-value", string(patched.Data["key"])) + }) + + t.Run("returns nil when object not found", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + original := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleted-secret", + Namespace: "default", + }, + } + modified := original.DeepCopy() + modified.Data = map[string][]byte{"key": []byte("value")} + + err := r.patchObject(context.Background(), original, modified, "Secret") + + assert.NoError(t, err) + }) +} + +func TestDeleteCNPGCluster(t *testing.T) { + scheme := runtime.NewScheme() + cnpgv1.AddToScheme(scheme) + + tests := []struct { + name string + objects []client.Object + cluster *cnpgv1.Cluster + }{ + { + name: "deletes existing cluster", + objects: []client.Object{ + &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + }, + }, + cluster: &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + }, + }, + { + name: "already deleted cluster returns nil", + cluster: &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gone-cluster", + Namespace: "default", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + + err := r.deleteCNPGCluster(context.Background(), tt.cluster) + + require.NoError(t, err) + }) + } +} + +func TestPoolerExists(t *testing.T) { + scheme := runtime.NewScheme() + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + + tests := []struct { + name string + objects []client.Object + expected bool + }{ + { + name: "returns true when pooler exists", + objects: []client.Object{ + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-rw", + Namespace: "default", + }, + }, + }, + expected: true, + }, + { + name: "returns false when given pooler is not found", + objects: []client.Object{ + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-ro", + Namespace: "default", + }, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + + got := r.poolerExists(context.Background(), cluster, "rw") + + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestDeleteConnectionPoolers(t *testing.T) { + scheme := runtime.NewScheme() + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + + rwPooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-rw", + Namespace: "default", + }, + } + roPooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-ro", + Namespace: "default", + }, + } + + t.Run("deletes both poolers when they exist", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rwPooler.DeepCopy(), roPooler.DeepCopy()).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + + err := r.deleteConnectionPoolers(context.Background(), cluster) + + require.NoError(t, err) + assert.True(t, apierrors.IsNotFound(c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-rw", Namespace: "default"}, &cnpgv1.Pooler{}))) + assert.True(t, apierrors.IsNotFound(c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-ro", Namespace: "default"}, &cnpgv1.Pooler{}))) + }) + + t.Run("no-op when no poolers exist", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + + err := r.deleteConnectionPoolers(context.Background(), cluster) + + require.NoError(t, err) + }) +} + +func TestGenerateSecret(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + t.Run("creates secret with credentials and owner reference", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + + err := r.generateSecret(context.Background(), cluster, "my-secret") + + require.NoError(t, err) + secret := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-secret", Namespace: "default"}, secret)) + assert.Equal(t, "my-secret", secret.Name) + assert.Equal(t, "default", secret.Namespace) + assert.Equal(t, corev1.SecretTypeOpaque, secret.Type) + require.Len(t, secret.OwnerReferences, 1) + assert.Equal(t, "cluster-uid", string(secret.OwnerReferences[0].UID)) + }) + + t.Run("no-op when secret already exists", func(t *testing.T) { + existing := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + }, + StringData: map[string]string{"username": "existing-user"}, + } + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + + err := r.generateSecret(context.Background(), cluster, "my-secret") + + require.NoError(t, err) + }) +} + +func TestArePoolersReady(t *testing.T) { + r := &PostgresClusterReconciler{} + + readyPooler := func(instances int32) *cnpgv1.Pooler { + return &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(instances)}, + Status: cnpgv1.PoolerStatus{Instances: instances}, + } + } + notReadyPooler := func(desired, actual int32) *cnpgv1.Pooler { + return &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(desired)}, + Status: cnpgv1.PoolerStatus{Instances: actual}, + } + } + + tests := []struct { + name string + rw *cnpgv1.Pooler + ro *cnpgv1.Pooler + expected bool + }{ + { + name: "returns true when both poolers are ready", + rw: readyPooler(2), + ro: readyPooler(2), + expected: true, + }, + { + name: "returns false when rw pooler not ready", + rw: notReadyPooler(2, 0), + ro: readyPooler(2), + expected: false, + }, + { + name: "returns false when ro pooler not ready", + rw: readyPooler(2), + ro: notReadyPooler(2, 1), + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := r.arePoolersReady(tt.rw, tt.ro) + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestCreateConnectionPooler(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + poolerInstances := int32(2) + poolerMode := enterprisev4.ConnectionPoolerModeTransaction + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + cnpg := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + cfg := &EffectiveClusterConfig{ + ProvisionerConfig: &enterprisev4.CNPGConfig{ + ConnectionPooler: &enterprisev4.ConnectionPoolerConfig{ + Instances: &poolerInstances, + Mode: &poolerMode, + Config: map[string]string{"default_pool_size": "25"}, + }, + }, + } + + tests := []struct { + name string + objects []client.Object + expectInstances int32 + }{ + { + name: "creates pooler when it does not exist", + objects: nil, + expectInstances: 2, + }, + { + name: "no-op when pooler already exists", + objects: []client.Object{ + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-rw", + Namespace: "default", + }, + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(1))}, + }, + }, + expectInstances: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + r := &PostgresClusterReconciler{Client: c, Scheme: scheme} + + err := r.createConnectionPooler(context.Background(), cluster.DeepCopy(), cfg, cnpg, "rw") + + require.NoError(t, err) + fetched := &cnpgv1.Pooler{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-rw", Namespace: "default"}, fetched)) + require.NotNil(t, fetched.Spec.Instances) + assert.Equal(t, tt.expectInstances, *fetched.Spec.Instances) + }) + } +} + +func TestGenerateConfigMap(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + + t.Run("base endpoints without poolers", func(t *testing.T) { + r := &PostgresClusterReconciler{Scheme: scheme} + cm, err := r.generateConfigMap(cluster.DeepCopy(), "my-secret", false) + + require.NoError(t, err) + assert.Equal(t, "my-cluster-configmap", cm.Name) + assert.Equal(t, "default", cm.Namespace) + assert.Equal(t, "my-cluster-rw.default", cm.Data["CLUSTER_RW_ENDPOINT"]) + assert.Equal(t, "my-cluster-ro.default", cm.Data["CLUSTER_RO_ENDPOINT"]) + assert.Equal(t, "my-cluster-r.default", cm.Data["CLUSTER_R_ENDPOINT"]) + assert.Equal(t, "5432", cm.Data["DEFAULT_CLUSTER_PORT"]) + assert.Equal(t, "postgres", cm.Data["SUPER_USER_NAME"]) + assert.Equal(t, "my-secret", cm.Data["SUPER_USER_SECRET_REF"]) + assert.NotContains(t, cm.Data, "CLUSTER_POOLER_RW_ENDPOINT") + require.Len(t, cm.OwnerReferences, 1) + assert.Equal(t, "cluster-uid", string(cm.OwnerReferences[0].UID)) + }) + + t.Run("includes pooler endpoints when poolerEnabled is true", func(t *testing.T) { + r := &PostgresClusterReconciler{Scheme: scheme} + cm, err := r.generateConfigMap(cluster.DeepCopy(), "my-secret", true) + + require.NoError(t, err) + assert.Equal(t, "my-cluster-pooler-rw.default", cm.Data["CLUSTER_POOLER_RW_ENDPOINT"]) + assert.Equal(t, "my-cluster-pooler-ro.default", cm.Data["CLUSTER_POOLER_RO_ENDPOINT"]) + }) + + t.Run("uses existing configmap name from status", func(t *testing.T) { + r := &PostgresClusterReconciler{Scheme: scheme} + pg := cluster.DeepCopy() + pg.Status.Resources = &enterprisev4.PostgresClusterResources{ + ConfigMapRef: &corev1.LocalObjectReference{Name: "custom-configmap"}, + } + + cm, err := r.generateConfigMap(pg, "my-secret", false) + + require.NoError(t, err) + assert.Equal(t, "custom-configmap", cm.Name) + }) +} diff --git a/internal/controller/postgresdatabase_controller.go b/internal/controller/postgresdatabase_controller.go new file mode 100644 index 000000000..814d9e75c --- /dev/null +++ b/internal/controller/postgresdatabase_controller.go @@ -0,0 +1,1440 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "encoding/json" + stderrors "errors" + "fmt" + "reflect" + "slices" + "strings" + "time" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/jackc/pgx/v5" + "github.com/sethvargo/go-password/password" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +const ( + secretRoleAdmin = "admin" + secretRoleRW = "rw" + + // Password generation — no symbols for PostgreSQL connection string compatibility. + passwordLength = 32 + passwordDigits = 8 + passwordSymbols = 0 + + // Label keys used on managed secrets. + labelManagedBy = "app.kubernetes.io/managed-by" + labelCNPGReload = "cnpg.io/reload" + + // postgresPort is the standard PostgreSQL port used in all connection strings. + postgresPort = "5432" + + // fieldManagerPrefix is the SSA field manager prefix for PostgresDatabase controllers. + fieldManagerPrefix = "postgresdatabase-" +) + +// fieldManagerName returns the SSA field manager name for a given PostgresDatabase. +func fieldManagerName(postgresDBName string) string { + return fieldManagerPrefix + postgresDBName +} + +// PostgresDatabaseReconciler reconciles a PostgresDatabase object +type PostgresDatabaseReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases/finalizers,verbs=update +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters,verbs=get;list;watch +//+kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters,verbs=get;list;watch;patch +//+kubebuilder:rbac:groups=postgresql.cnpg.io,resources=databases,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;delete +//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;delete + +func (r *PostgresDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.Info("Reconciling PostgresDatabase", "name", req.Name, "namespace", req.Namespace) + + postgresDB := &enterprisev4.PostgresDatabase{} + if err := r.Get(ctx, req.NamespacedName, postgresDB); err != nil { + if errors.IsNotFound(err) { + logger.Info("PostgresDatabase resource not found, ignoring") + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to get PostgresDatabase", "name", req.Name) + return ctrl.Result{}, err + } + logger.Info("PostgresDatabase CR Fetched successfully", "generation", postgresDB.Generation) + + // Closure captures postgresDB so call sites don't repeat it on every status update. + updateStatus := func(conditionType conditionTypes, conditionStatus metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileDBPhases) error { + return r.updateStatus(ctx, postgresDB, conditionType, conditionStatus, reason, message, phase) + } + + // Handle finalizer: cleanup on deletion, register on creation + if postgresDB.GetDeletionTimestamp() != nil { + if err := r.handleDeletion(ctx, postgresDB); err != nil { + logger.Error(err, "Cleanup failed for PostgresDatabase") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + if !controllerutil.ContainsFinalizer(postgresDB, postgresDatabaseFinalizerName) { + controllerutil.AddFinalizer(postgresDB, postgresDatabaseFinalizerName) + if err := r.Update(ctx, postgresDB); err != nil { + logger.Error(err, "Failed to add finalizer to PostgresDatabase") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + // ObservedGeneration is only written when all phases complete successfully, + // so equality means nothing changed and there is no pending work. + if postgresDB.Status.ObservedGeneration == postgresDB.Generation { + logger.Info("Spec unchanged and all phases complete, skipping") + return ctrl.Result{}, nil + } + logger.Info("Changes to resource detected, reconciling...") + + // Phase: ClusterValidation + var cluster *enterprisev4.PostgresCluster + var clusterStatus clusterReadyStatus + var err error + + cluster, clusterStatus, err = r.ensureClusterReady(ctx, postgresDB) + if err != nil { + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterInfoFetchFailed, "Can't reach Cluster CR due to transient errors", pendingDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + logger.Info("Cluster validation done", "clusterName", postgresDB.Spec.ClusterRef.Name, "status", clusterStatus) + + switch clusterStatus { + case ClusterNotFound: + if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterNotFound, "Cluster CR not found", pendingDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: clusterNotFoundRetryDelay}, nil + + case ClusterNotReady, ClusterNoProvisionerRef: + if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterProvisioning, "Cluster is not in ready state yet", pendingDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + + case ClusterReady: + if err := updateStatus(clusterReady, metav1.ConditionTrue, reasonClusterAvailable, "Cluster is operational", provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + } + + // Phase: RoleConflictCheck — before creating any resources, verify no other + // field manager already owns the same roles via SSA. + roleConflicts := getRoleConflicts(postgresDB, cluster) + if len(roleConflicts) > 0 { + conflictMsg := fmt.Sprintf("Role conflict: %s. "+ + "If you deleted a previous PostgresDatabase, recreate it with the original name to re-adopt the orphaned resources.", + strings.Join(roleConflicts, ", ")) + logger.Error(nil, conflictMsg) + if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonRoleConflict, conflictMsg, failedDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, nil + } + + // We need the CNPG Cluster directly because PostgresCluster status does not yet + // surface managed role reconciliation state — tracked as a future abstraction improvement. + cnpgCluster := &cnpgv1.Cluster{} + if err := r.Get(ctx, types.NamespacedName{ + Name: cluster.Status.ProvisionerRef.Name, + Namespace: cluster.Status.ProvisionerRef.Namespace, + }, cnpgCluster); err != nil { + logger.Error(err, "Failed to fetch CNPG Cluster") + return ctrl.Result{}, err + } + + // Phase: CredentialProvisioning — secrets must exist before roles are patched, + // CNPG rejects a PasswordSecretRef pointing at a missing secret. + if err := r.reconcileUserSecrets(ctx, postgresDB); err != nil { + if statusErr := updateStatus(secretsReady, metav1.ConditionFalse, reasonSecretsCreationFailed, + fmt.Sprintf("Failed to reconcile user secrets: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(secretsReady, metav1.ConditionTrue, reasonSecretsCreated, + fmt.Sprintf("All secrets provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: ConnectionMetadata — ConfigMaps carry connection info consumers need as soon as + // databases are ready, so they are created alongside secrets before any role or database work begins. + endpoints := resolveClusterEndpoints(cluster, cnpgCluster, postgresDB.Namespace) + if err := r.reconcileRoleConfigMaps(ctx, postgresDB, endpoints); err != nil { + if statusErr := updateStatus(configMapsReady, metav1.ConditionFalse, reasonConfigMapsCreationFailed, + fmt.Sprintf("Failed to reconcile ConfigMaps: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(configMapsReady, metav1.ConditionTrue, reasonConfigMapsCreated, + fmt.Sprintf("All ConfigMaps provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: RoleProvisioning + desiredUsers := getDesiredUsers(postgresDB) + actualRolesInSpec := getUsersInClusterSpec(cluster) + var missingRolesFromSpec []string + for _, role := range desiredUsers { + if !slices.Contains(actualRolesInSpec, role) { + missingRolesFromSpec = append(missingRolesFromSpec, role) + } + } + + if len(missingRolesFromSpec) > 0 { + logger.Info("User spec changed, patching CNPG Cluster", "missing", missingRolesFromSpec) + if err := r.patchManagedRoles(ctx, postgresDB, cluster); err != nil { + logger.Error(err, "Failed to patch users in CNPG Cluster") + return ctrl.Result{}, err + } + // Spec updated, requeue to check status + if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, fmt.Sprintf("Waiting for %d roles to be reconciled", len(desiredUsers)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + + notReadyRoles, err := r.verifyRolesReady(ctx, desiredUsers, cnpgCluster) + if err != nil { + if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonUsersCreationFailed, fmt.Sprintf("Role creation failed: %v", err), failedDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + if len(notReadyRoles) > 0 { + if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, fmt.Sprintf("Waiting for roles to be reconciled: %v", notReadyRoles), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + + // All users present in spec and reconciled in status + if err := updateStatus(rolesReady, metav1.ConditionTrue, reasonUsersAvailable, fmt.Sprintf("All %d users in PostgreSQL", len(desiredUsers)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: DatabaseProvisioning + if err := r.reconcileCNPGDatabases(ctx, postgresDB, cluster); err != nil { + logger.Error(err, "Failed to reconcile CNPG Databases") + return ctrl.Result{}, err + } + + notReadyDatabases, err := r.verifyDatabasesReady(ctx, postgresDB) + if err != nil { + logger.Error(err, "Failed to verify database status") + return ctrl.Result{}, err + } + + if len(notReadyDatabases) > 0 { + if err := updateStatus(databasesReady, metav1.ConditionFalse, reasonWaitingForCNPG, + fmt.Sprintf("Waiting for databases to be ready: %v", notReadyDatabases), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + if err := updateStatus(databasesReady, metav1.ConditionTrue, reasonDatabasesAvailable, fmt.Sprintf("All %d databases ready", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: RWRolePrivileges + // Skipped when no new databases are detected — ALTER DEFAULT PRIVILEGES covers tables + // added by migrations on existing databases. Re-runs for all databases when a new one + // is added (idempotent for existing ones, required for the new one). + if hasNewDatabases(postgresDB) { + if cluster.Status.Resources == nil || cluster.Status.Resources.SecretRef == nil { + return ctrl.Result{}, fmt.Errorf("PostgresCluster %s has no superuser secret in status yet", cluster.Name) + } + superSecret := &corev1.Secret{} + if err := r.Get(ctx, types.NamespacedName{ + Name: cluster.Status.Resources.SecretRef.Name, + Namespace: postgresDB.Namespace, + }, superSecret); err != nil { + return ctrl.Result{}, fmt.Errorf("fetching superuser secret %s: %w", cluster.Status.Resources.SecretRef.Name, err) + } + + dbNames := make([]string, 0, len(postgresDB.Spec.Databases)) + for _, dbSpec := range postgresDB.Spec.Databases { + dbNames = append(dbNames, dbSpec.Name) + } + + password, ok := superSecret.Data["password"] + if !ok { + return ctrl.Result{}, fmt.Errorf("superuser secret %s missing 'password' key", cluster.Status.Resources.SecretRef.Name) + } + + if err := reconcileRWRolePrivileges(ctx, endpoints.RWHost, string(password), dbNames); err != nil { + if statusErr := updateStatus(privilegesReady, metav1.ConditionFalse, reasonPrivilegesGrantFailed, + fmt.Sprintf("Failed to grant RW role privileges: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(privilegesReady, metav1.ConditionTrue, reasonPrivilegesGranted, + fmt.Sprintf("RW role privileges granted for all %d databases", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { + return ctrl.Result{}, err + } + } + + postgresDB.Status.Databases = populateDatabaseStatus(postgresDB) + postgresDB.Status.ObservedGeneration = postgresDB.Generation + + logger.Info("All phases complete") + return ctrl.Result{}, nil +} + +// ensureClusterReady checks if the referenced PostgresCluster exists and is ready +// Returns: cluster (if found), status, error (API errors only) +func (r *PostgresDatabaseReconciler) ensureClusterReady( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, +) (*enterprisev4.PostgresCluster, clusterReadyStatus, error) { + logger := log.FromContext(ctx) + + cluster := &enterprisev4.PostgresCluster{} + if err := r.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { + if errors.IsNotFound(err) { + return nil, ClusterNotFound, nil + } + logger.Error(err, "Failed to fetch Cluster", "name", postgresDB.Spec.ClusterRef.Name) + return nil, ClusterNotReady, err + } + + if cluster.Status.Phase != string(ClusterReady) { + logger.Info("Cluster not ready", "status", cluster.Status.Phase) + return cluster, ClusterNotReady, nil + } + + if cluster.Status.ProvisionerRef == nil { + logger.Info("Cluster has no ProvisionerRef yet", "cluster", cluster.Name) + return cluster, ClusterNoProvisionerRef, nil + } + + return cluster, ClusterReady, nil +} + +// getDesiredUsers builds the list of users we want to create for this PostgresDatabase +func getDesiredUsers(postgresDB *enterprisev4.PostgresDatabase) []string { + users := make([]string, 0, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + users = append(users, adminRoleName(dbSpec.Name), rwRoleName(dbSpec.Name)) + } + return users +} + +// getUsersInClusterSpec checks our PostgresCluster CR rather than the CNPG Cluster +// because the database controller owns PostgresCluster.spec.managedRoles via SSA — +// CNPG may have roles from other sources that we must not treat as our own. +// Name-only comparison is sufficient: PasswordSecretRef is always set in the same +// reconcile that creates the role, so a role present by name already carries the correct ref. +func getUsersInClusterSpec(cluster *enterprisev4.PostgresCluster) []string { + users := make([]string, 0, len(cluster.Spec.ManagedRoles)) + for _, role := range cluster.Spec.ManagedRoles { + users = append(users, role.Name) + } + return users +} + +// getRoleConflicts checks ManagedFields on the PostgresCluster to detect if any roles +// this PostgresDatabase wants to own are already claimed by a different SSA field manager. +func getRoleConflicts(postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) []string { + myManager := fieldManagerName(postgresDB.Name) + + desired := make(map[string]struct{}, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + desired[adminRoleName(dbSpec.Name)] = struct{}{} + desired[rwRoleName(dbSpec.Name)] = struct{}{} + } + + roleOwners := managedRoleOwners(cluster.ManagedFields) + + var conflicts []string + for roleName := range desired { + owner, exists := roleOwners[roleName] + if exists && owner != myManager { + conflicts = append(conflicts, fmt.Sprintf("%s (owned by %s)", roleName, owner)) + } + } + return conflicts +} + +// managedRoleOwners builds a map of role name → field manager from ManagedFields. +func managedRoleOwners(managedFields []metav1.ManagedFieldsEntry) map[string]string { + owners := make(map[string]string) + for _, mf := range managedFields { + if mf.FieldsV1 == nil { + continue + } + for _, name := range parseRoleNames(mf.FieldsV1.Raw) { + owners[name] = mf.Manager + } + } + return owners +} + +// parseRoleNames extracts role names from FieldsV1 JSON by walking +// f:spec → f:managedRoles → k:{"name":""}. +func parseRoleNames(raw []byte) []string { + var fields map[string]any + if err := json.Unmarshal(raw, &fields); err != nil { + return nil + } + + spec, _ := fields["f:spec"].(map[string]any) + roles, _ := spec["f:managedRoles"].(map[string]any) + + var names []string + for key := range roles { + var k struct{ Name string } + if err := json.Unmarshal([]byte(strings.TrimPrefix(key, "k:")), &k); err == nil && k.Name != "" { + names = append(names, k.Name) + } + } + return names +} + +// patchManagedRoles patches PostgresCluster.spec.managedRoles via SSA using an unstructured patch. +// Using unstructured avoids the zero-value problem: typed Go structs serialize required fields +// (e.g. spec.class) as "" even when unset, causing SSA to claim ownership and conflict. +// An unstructured map contains ONLY the keys we explicitly set — nothing else leaks. +// PostgresCluster controller will then diff and reconcile these roles to CNPG Cluster. +func (r *PostgresDatabaseReconciler) patchManagedRoles( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + cluster *enterprisev4.PostgresCluster, +) error { + logger := log.FromContext(ctx) + + // Build roles — name, ensure, and PasswordSecretRef pointing to the pre-created secrets. + // Secrets are guaranteed to exist at this point because Phase 2a (reconcileUserSecrets) + // runs before patchManagedRoles in the reconciliation loop. + allRoles := make([]enterprisev4.ManagedRole, 0, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + allRoles = append(allRoles, + enterprisev4.ManagedRole{ + Name: adminRoleName(dbSpec.Name), + Ensure: "present", + PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)}, + }, + enterprisev4.ManagedRole{ + Name: rwRoleName(dbSpec.Name), + Ensure: "present", + PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)}, + }) + } + + // Construct a minimal unstructured patch — only spec.managedRoles is present. + // No other spec fields (class, storage, instances...) are included, so SSA + // will only claim ownership of the roles we explicitly list. + rolePatch := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": cluster.APIVersion, + "kind": cluster.Kind, + "metadata": map[string]any{ + "name": cluster.Name, + "namespace": cluster.Namespace, + }, + "spec": map[string]any{ + "managedRoles": allRoles, + }, + }, + } + + fieldManager := fieldManagerName(postgresDB.Name) + if err := r.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManager)); err != nil { + logger.Error(err, "Failed to add users to PostgresCluster", "postgresDatabase", postgresDB.Name) + return fmt.Errorf("failed to patch managed roles for PostgresDatabase %s: %w", postgresDB.Name, err) + } + logger.Info("Users added to PostgresCluster via SSA", "postgresDatabase", postgresDB.Name, "postgresCluster", cluster.Name, "roleCount", len(allRoles)) + + return nil +} + +// verifyRolesReady checks if CNPG has finished creating the users. +func (r *PostgresDatabaseReconciler) verifyRolesReady( + ctx context.Context, + expectedUsers []string, + cnpgCluster *cnpgv1.Cluster, +) ([]string, error) { + logger := log.FromContext(ctx) + + if cnpgCluster.Status.ManagedRolesStatus.CannotReconcile != nil { + for _, userName := range expectedUsers { + if errs, exists := cnpgCluster.Status.ManagedRolesStatus.CannotReconcile[userName]; exists { + logger.Error(nil, "User reconciliation failed permanently", "user", userName, "errors", errs) + return nil, fmt.Errorf("user %s reconciliation failed: %v", userName, errs) + } + } + } + + reconciledUsers := cnpgCluster.Status.ManagedRolesStatus.ByStatus[cnpgv1.RoleStatusReconciled] + var notReady []string + for _, userName := range expectedUsers { + if !slices.Contains(reconciledUsers, userName) { + notReady = append(notReady, userName) + } + } + + if len(notReady) > 0 { + logger.Info("Users not reconciled yet", "pending", notReady) + } else { + logger.Info("All users reconciled") + } + return notReady, nil +} + +// reconcileCNPGDatabases creates or updates CNPG Database CRs for each database in the spec. +func (r *PostgresDatabaseReconciler) reconcileCNPGDatabases( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + cluster *enterprisev4.PostgresCluster, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range postgresDB.Spec.Databases { + logger.Info("Processing database", "database", dbSpec.Name) + + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + + // reclaimPolicy controls whether CNPG physically drops the PostgreSQL database + // when the CR is deleted — a destructive and irreversible operation. + reclaimPolicy := cnpgv1.DatabaseReclaimDelete + if dbSpec.DeletionPolicy == deletionPolicyRetain { + reclaimPolicy = cnpgv1.DatabaseReclaimRetain + } + + cnpgDB := &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: cnpgDBName, + Namespace: postgresDB.Namespace, + }, + } + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, cnpgDB, func() error { + + spec := cnpgv1.DatabaseSpec{ + Name: dbSpec.Name, + Owner: adminRoleName(dbSpec.Name), + ClusterRef: corev1.LocalObjectReference{ + Name: cluster.Status.ProvisionerRef.Name, + }, + ReclaimPolicy: reclaimPolicy, + } + cnpgDB.Spec = spec + + reAdopting := cnpgDB.Annotations[annotationRetainedFrom] == postgresDB.Name + if reAdopting { + logger.Info("Re-adopting orphaned CNPG Database", "name", cnpgDBName) + delete(cnpgDB.Annotations, annotationRetainedFrom) + } + // Set ownerRef on creation or re-adoption + if cnpgDB.CreationTimestamp.IsZero() || reAdopting { + if err := controllerutil.SetControllerReference(postgresDB, cnpgDB, r.Scheme); err != nil { + logger.Error(err, "Failed to set owner reference") + return err + } + } + return nil + }) + if err != nil { + logger.Error(err, "Failed to create CNPG Database", "name", cnpgDBName) + return fmt.Errorf("failed to create CNPG Database %s: %w", cnpgDBName, err) + } + logger.Info("CNPG Database created/updated successfully", "database", dbSpec.Name) + } + return nil +} + +// verifyDatabasesReady checks if CNPG has finished provisioning the databases. +// All databases are checked before returning so the caller gets a complete picture, +// consistent with verifyRolesReady. +func (r *PostgresDatabaseReconciler) verifyDatabasesReady( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, +) ([]string, error) { + logger := log.FromContext(ctx) + + var notReady []string + for _, dbSpec := range postgresDB.Spec.Databases { + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + + cnpgDB := &cnpgv1.Database{} + if err := r.Get(ctx, types.NamespacedName{ + Name: cnpgDBName, + Namespace: postgresDB.Namespace, + }, cnpgDB); err != nil { + logger.Error(err, "Failed to get CNPG Database status", "database", dbSpec.Name) + return nil, fmt.Errorf("failed to get CNPG Database %s: %w", cnpgDBName, err) + } + + if cnpgDB.Status.Applied == nil || !*cnpgDB.Status.Applied { + notReady = append(notReady, dbSpec.Name) + } + } + return notReady, nil +} + +// updateStatus sets a condition and phase on the PostgresDatabase status in a single write — +// callers should not call r.Status().Update() directly. +func (r *PostgresDatabaseReconciler) updateStatus( + ctx context.Context, + db *enterprisev4.PostgresDatabase, + conditionType conditionTypes, + conditionStatus metav1.ConditionStatus, + reason conditionReasons, + message string, + phase reconcileDBPhases, +) error { + meta.SetStatusCondition(&db.Status.Conditions, metav1.Condition{ + Type: string(conditionType), + Status: conditionStatus, + Reason: string(reason), + Message: message, + ObservedGeneration: db.Generation, + }) + db.Status.Phase = string(phase) + return r.Status().Update(ctx, db) +} + +// deletionPlan separates databases by their DeletionPolicy for the cleanup workflow. +type deletionPlan struct { + retained []enterprisev4.DatabaseDefinition + deleted []enterprisev4.DatabaseDefinition +} + +// buildDeletionPlan splits databases into retained and deleted groups. +func buildDeletionPlan(databases []enterprisev4.DatabaseDefinition) deletionPlan { + var plan deletionPlan + for _, db := range databases { + if db.DeletionPolicy == deletionPolicyRetain { + plan.retained = append(plan.retained, db) + } else { + plan.deleted = append(plan.deleted, db) + } + } + return plan +} + +// handleDeletion orchestrates the cleanup workflow for a PostgresDatabase being deleted. +func (r *PostgresDatabaseReconciler) handleDeletion(ctx context.Context, postgresDB *enterprisev4.PostgresDatabase) error { + plan := buildDeletionPlan(postgresDB.Spec.Databases) + + if err := r.orphanRetainedResources(ctx, postgresDB, plan.retained); err != nil { + return err + } + if err := r.deleteRemovedResources(ctx, postgresDB, plan.deleted); err != nil { + return err + } + if err := r.cleanupManagedRoles(ctx, postgresDB, plan); err != nil { + return err + } + + controllerutil.RemoveFinalizer(postgresDB, postgresDatabaseFinalizerName) + if err := r.Update(ctx, postgresDB); err != nil { + if errors.IsNotFound(err) { + return nil + } + return fmt.Errorf("failed to remove finalizer: %w", err) + } + + log.FromContext(ctx).Info("Cleanup complete for PostgresDatabase", + "name", postgresDB.Name, + "retained", len(plan.retained), + "deleted", len(plan.deleted)) + return nil +} + +// orphanRetainedResources strips ownerRefs and adds retention annotations +// so resources survive the parent's deletion and can be re-adopted later. +func (r *PostgresDatabaseReconciler) orphanRetainedResources( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + retained []enterprisev4.DatabaseDefinition, +) error { + if err := r.orphanCNPGDatabases(ctx, postgresDB, retained); err != nil { + return err + } + if err := r.orphanConfigMaps(ctx, postgresDB, retained); err != nil { + return err + } + if err := r.orphanSecrets(ctx, postgresDB, retained); err != nil { + return err + } + return nil +} + +// deleteRemovedResources deletes CNPG Databases, ConfigMaps, and Secrets +// for databases with the Delete policy. +func (r *PostgresDatabaseReconciler) deleteRemovedResources( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + deleted []enterprisev4.DatabaseDefinition, +) error { + if err := r.deleteCNPGDatabases(ctx, postgresDB, deleted); err != nil { + return err + } + if err := r.deleteConfigMaps(ctx, postgresDB, deleted); err != nil { + return err + } + if err := r.deleteSecrets(ctx, postgresDB, deleted); err != nil { + return err + } + return nil +} + +// cleanupManagedRoles releases SSA ownership of deleted databases' roles. +// When all databases are retained, roles stay as-is under our field manager. +func (r *PostgresDatabaseReconciler) cleanupManagedRoles( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + plan deletionPlan, +) error { + if len(plan.deleted) == 0 { + return nil + } + cluster := &enterprisev4.PostgresCluster{} + if err := r.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to get PostgresCluster for role cleanup: %w", err) + } + log.FromContext(ctx).Info("PostgresCluster already deleted, skipping role cleanup") + return nil + } + return r.patchManagedRolesOnDeletion(ctx, postgresDB, cluster, plan.retained) +} + +// orphanCNPGDatabases strips ownerReferences and adds a retention annotation +// on CNPG Database CRs for the given databases. +func (r *PostgresDatabaseReconciler) orphanCNPGDatabases( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + databases []enterprisev4.DatabaseDefinition, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range databases { + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + db := &cnpgv1.Database{} + if err := r.Get(ctx, types.NamespacedName{Name: cnpgDBName, Namespace: postgresDB.Namespace}, db); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("failed to get CNPG Database %s for orphaning: %w", cnpgDBName, err) + } + if db.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(db, postgresDB.UID) + if db.Annotations == nil { + db.Annotations = make(map[string]string) + } + db.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := r.Update(ctx, db); err != nil { + return fmt.Errorf("failed to orphan CNPG Database %s: %w", cnpgDBName, err) + } + logger.Info("Orphaned CNPG Database CR", "name", cnpgDBName) + } + return nil +} + +// orphanConfigMaps strips ownerReferences and adds a retention annotation +// on ConfigMaps for the given databases. +func (r *PostgresDatabaseReconciler) orphanConfigMaps( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + databases []enterprisev4.DatabaseDefinition, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range databases { + cmName := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{} + if err := r.Get(ctx, types.NamespacedName{Name: cmName, Namespace: postgresDB.Namespace}, cm); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("failed to get ConfigMap %s for orphaning: %w", cmName, err) + } + if cm.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(cm, postgresDB.UID) + if cm.Annotations == nil { + cm.Annotations = make(map[string]string) + } + cm.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := r.Update(ctx, cm); err != nil { + return fmt.Errorf("failed to orphan ConfigMap %s: %w", cmName, err) + } + logger.Info("Orphaned ConfigMap", "name", cmName) + } + return nil +} + +// orphanSecrets strips ownerReferences and adds a retention annotation +// on Secrets for the given databases. +func (r *PostgresDatabaseReconciler) orphanSecrets( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + databases []enterprisev4.DatabaseDefinition, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range databases { + for _, role := range []string{secretRoleAdmin, secretRoleRW} { + secretName := roleSecretName(postgresDB.Name, dbSpec.Name, role) + secret := &corev1.Secret{} + if err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: postgresDB.Namespace}, secret); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("failed to get Secret %s for orphaning: %w", secretName, err) + } + if secret.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(secret, postgresDB.UID) + if secret.Annotations == nil { + secret.Annotations = make(map[string]string) + } + secret.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := r.Update(ctx, secret); err != nil { + return fmt.Errorf("failed to orphan Secret %s: %w", secretName, err) + } + logger.Info("Orphaned Secret", "name", secretName) + } + } + return nil +} + +// deleteCNPGDatabases explicitly deletes CNPG Database CRs for the given databases. +func (r *PostgresDatabaseReconciler) deleteCNPGDatabases( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + databases []enterprisev4.DatabaseDefinition, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range databases { + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + db := &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: cnpgDBName, + Namespace: postgresDB.Namespace, + }, + } + if err := r.Delete(ctx, db); err != nil { + if errors.IsNotFound(err) { + logger.Info("CNPG Database already deleted", "name", cnpgDBName) + continue + } + return fmt.Errorf("failed to delete CNPG Database %s: %w", cnpgDBName, err) + } + logger.Info("Deleted CNPG Database CR", "name", cnpgDBName) + } + return nil +} + +// deleteConfigMaps explicitly deletes ConfigMaps for the given databases. +func (r *PostgresDatabaseReconciler) deleteConfigMaps( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + databases []enterprisev4.DatabaseDefinition, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range databases { + cmName := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: postgresDB.Namespace, + }, + } + if err := r.Delete(ctx, cm); err != nil { + if errors.IsNotFound(err) { + logger.Info("ConfigMap already deleted", "name", cmName) + continue + } + return fmt.Errorf("failed to delete ConfigMap %s: %w", cmName, err) + } + logger.Info("Deleted ConfigMap", "name", cmName) + } + return nil +} + +// deleteSecrets explicitly deletes Secrets for the given databases. +func (r *PostgresDatabaseReconciler) deleteSecrets( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + databases []enterprisev4.DatabaseDefinition, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range databases { + for _, role := range []string{secretRoleAdmin, secretRoleRW} { + secretName := roleSecretName(postgresDB.Name, dbSpec.Name, role) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: postgresDB.Namespace, + }, + } + if err := r.Delete(ctx, secret); err != nil { + if errors.IsNotFound(err) { + logger.Info("Secret already deleted", "name", secretName) + continue + } + return fmt.Errorf("failed to delete Secret %s: %w", secretName, err) + } + logger.Info("Deleted Secret", "name", secretName) + } + } + return nil +} + +// buildRetainedRoles returns the SSA role list for databases that are being retained. +// Returns an empty slice when no databases are retained, which clears our field manager's claim. +func buildRetainedRoles(postgresDBName string, retainedDBs []enterprisev4.DatabaseDefinition) []enterprisev4.ManagedRole { + roles := make([]enterprisev4.ManagedRole, 0, len(retainedDBs)*2) + for _, dbSpec := range retainedDBs { + roles = append(roles, + enterprisev4.ManagedRole{ + Name: adminRoleName(dbSpec.Name), + Ensure: "present", + PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleAdmin)}, + }, + enterprisev4.ManagedRole{ + Name: rwRoleName(dbSpec.Name), + Ensure: "present", + PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleRW)}, + }, + ) + } + return roles +} + +// patchManagedRolesOnDeletion applies an SSA patch to keep only retained databases' roles. +// +// SSA ensures that each patch only affects fields owned by our field manager +// (postgresdatabase-). This means when one PostgresDatabase is deleted, +// its role cleanup cannot interfere with roles managed by other PostgresDatabase +// controllers targeting the same PostgresCluster. +func (r *PostgresDatabaseReconciler) patchManagedRolesOnDeletion( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + cluster *enterprisev4.PostgresCluster, + retainedDBs []enterprisev4.DatabaseDefinition, +) error { + roles := buildRetainedRoles(postgresDB.Name, retainedDBs) + + rolePatch := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": cluster.APIVersion, + "kind": cluster.Kind, + "metadata": map[string]any{ + "name": cluster.Name, + "namespace": cluster.Namespace, + }, + "spec": map[string]any{ + "managedRoles": roles, + }, + }, + } + + fieldManager := fieldManagerName(postgresDB.Name) + if err := r.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManager)); err != nil { + return fmt.Errorf("failed to patch managed roles on deletion: %w", err) + } + + log.FromContext(ctx).Info("Patched managed roles on deletion", + "postgresDatabase", postgresDB.Name, + "retainedRoles", len(roles)) + return nil +} + +// stripOwnerReference removes only the ownerReference matching the given UID from obj, +// preserving any other owner references the object may have. +func stripOwnerReference(obj metav1.Object, ownerUID types.UID) { + refs := obj.GetOwnerReferences() + filtered := make([]metav1.OwnerReference, 0, len(refs)) + for _, ref := range refs { + if ref.UID != ownerUID { + filtered = append(filtered, ref) + } + } + obj.SetOwnerReferences(filtered) +} + +// adoptResource removes the retention annotation, restores the controller ownerRef, +// and updates the object. Works for any resource type (Secret, ConfigMap, CNPG Database). +func (r *PostgresDatabaseReconciler) adoptResource( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + obj client.Object, +) error { + annotations := obj.GetAnnotations() + delete(annotations, annotationRetainedFrom) + obj.SetAnnotations(annotations) + if err := controllerutil.SetControllerReference(postgresDB, obj, r.Scheme); err != nil { + return err + } + return r.Update(ctx, obj) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PostgresDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { + + // Index CNPG Databases by controller owner so getDatabasesInCNPGSpec can filter by owner name without a full list scan. + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &cnpgv1.Database{}, + ".metadata.controller", + func(obj client.Object) []string { + owner := metav1.GetControllerOf(obj) + if owner == nil { + return nil + } + if owner.APIVersion != enterprisev4.GroupVersion.String() || owner.Kind != "PostgresDatabase" { + return nil + } + return []string{owner.Name} + }, + ); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). + For(&enterprisev4.PostgresDatabase{}, builder.WithPredicates( + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return !reflect.DeepEqual( + e.ObjectOld.GetFinalizers(), + e.ObjectNew.GetFinalizers(), + ) + }, + }, + ), + )). + Owns(&cnpgv1.Database{}). + Owns(&corev1.Secret{}). + Owns(&corev1.ConfigMap{}). + Named("postgresdatabase"). + Complete(r) +} + +// DBRepo abstracts SQL execution so grant logic is testable without a live cluster. +// Connection lifecycle is managed internally — callers only call ExecGrants. +type DBRepo interface { + ExecGrants(ctx context.Context, dbName string) error +} + +type dbRepo struct { + conn *pgx.Conn +} + +// newDBRepo opens a direct superuser connection, bypassing any pooler. +// PgBouncer in transaction mode blocks DDL; password set on config avoids URL-encoding issues. +func newDBRepo(ctx context.Context, host, dbName, password string) (DBRepo, error) { + cfg, err := pgx.ParseConfig(fmt.Sprintf( + "postgres://%s@%s:%s/%s?sslmode=require&connect_timeout=%d", + superUsername, host, postgresPort, dbName, + int(dbConnectTimeout.Seconds()), + )) + if err != nil { + return nil, fmt.Errorf("parsing connection config for %s/%s: %w", host, dbName, err) + } + cfg.Password = password + + conn, err := pgx.ConnectConfig(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("connecting to %s/%s: %w", host, dbName, err) + } + return &dbRepo{conn: conn}, nil +} + +// ExecGrants applies all privilege grants needed for the RW role on a single database. +// GRANT ON ALL TABLES/SEQUENCES covers existing objects; ALTER DEFAULT PRIVILEGES covers +// future ones created by the admin role (e.g. via migrations). +func (r *dbRepo) ExecGrants(ctx context.Context, dbName string) error { + defer r.conn.Close(context.Background()) + + adminRole := adminRoleName(dbName) + rwRole := rwRoleName(dbName) + + tx, err := r.conn.Begin(ctx) + if err != nil { + return fmt.Errorf("beginning transaction: %w", err) + } + + // Identifiers cannot be parameterised in PostgreSQL — fmt.Sprintf is correct here. + // These names are generated internally by our own functions, never from user input. + stmts := []string{ + fmt.Sprintf("GRANT CONNECT ON DATABASE %s TO %s", dbName, rwRole), + fmt.Sprintf("GRANT USAGE ON SCHEMA public TO %s", rwRole), + fmt.Sprintf("GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO %s", rwRole), + fmt.Sprintf("GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO %s", rwRole), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO %s", adminRole, rwRole), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO %s", adminRole, rwRole), + } + + for _, stmt := range stmts { + if _, err := tx.Exec(ctx, stmt); err != nil { + return fmt.Errorf("executing grant %q: %w", stmt, err) + } + } + + return tx.Commit(ctx) +} + +// hasNewDatabases returns true when spec contains a database not yet present in status. +// Used to skip the grants phase when a spec change is unrelated to the database set — +// grants only need to run when a new database is introduced, not on every spec update. +func hasNewDatabases(postgresDB *enterprisev4.PostgresDatabase) bool { + existing := make(map[string]bool, len(postgresDB.Status.Databases)) + for _, dbInfo := range postgresDB.Status.Databases { + existing[dbInfo.Name] = true + } + for _, dbSpec := range postgresDB.Spec.Databases { + if !existing[dbSpec.Name] { + return true + } + } + return false +} + +// reconcileRWRolePrivileges ensures the RW role has database-level access. +// CNPG owns role existence, not privileges — the RW role can authenticate but gets +// "permission denied" on every query until these grants are applied. +func reconcileRWRolePrivileges( + ctx context.Context, + rwHost string, + superPassword string, + dbNames []string, +) error { + logger := log.FromContext(ctx) + + var errs []error + for _, dbName := range dbNames { + db, err := newDBRepo(ctx, rwHost, dbName, superPassword) + if err != nil { + logger.Error(err, "Failed to connect to database", "database", dbName) + errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) + continue + } + if err := db.ExecGrants(ctx, dbName); err != nil { + logger.Error(err, "Failed to grant RW role privileges", "database", dbName) + errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) + continue + } + logger.Info("RW role privileges granted", "database", dbName, "rwRole", rwRoleName(dbName)) + } + + return stderrors.Join(errs...) +} + +// dbConnectTimeout caps how long we wait for the primary to accept a connection. +// A hung primary must not stall the reconcile goroutine indefinitely. +const dbConnectTimeout = 10 * time.Second + +// roleSecretName gives both secret creation and status wiring a single source of truth +// for naming — eliminating any risk of the two sides drifting out of sync. +func roleSecretName(postgresDBName, dbName, role string) string { + return fmt.Sprintf("%s-%s-%s", postgresDBName, dbName, role) +} + +func adminRoleName(dbName string) string { return dbName + "_admin" } + +func rwRoleName(dbName string) string { return dbName + "_rw" } + +func cnpgDatabaseName(postgresDBName, dbName string) string { + return fmt.Sprintf("%s-%s", postgresDBName, dbName) +} + +// generatePassword uses crypto/rand (via sethvargo/go-password) rather than math/rand +// because these credentials protect live database access — predictability is unacceptable. +func generatePassword() (string, error) { + return password.Generate(passwordLength, passwordDigits, passwordSymbols, false, true) +} + +// reconcileUserSecrets ensures admin and rw secrets exist for each database, +// delegating per-secret logic to ensureSecret. +func (r *PostgresDatabaseReconciler) reconcileUserSecrets( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, +) error { + for _, dbSpec := range postgresDB.Spec.Databases { + if err := r.ensureSecret(ctx, postgresDB, adminRoleName(dbSpec.Name), + roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)); err != nil { + return err + } + if err := r.ensureSecret(ctx, postgresDB, rwRoleName(dbSpec.Name), + roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)); err != nil { + return err + } + } + return nil +} + +// ensureSecret handles three states: missing (create), orphaned (re-adopt), or existing (no-op). +// Intentionally not using CreateOrUpdate because secrets must never be updated after creation. +// Rotating a password here would break live connections before the application picks up the change. +func (r *PostgresDatabaseReconciler) ensureSecret( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + roleName, secretName string, +) error { + secret, err := r.getSecret(ctx, postgresDB.Namespace, secretName) + if err != nil { + return err + } + logger := log.FromContext(ctx) + + switch { + case secret == nil: + logger.Info("Creating missing user secret", "name", secretName) + return r.createUserSecret(ctx, postgresDB, roleName, secretName) + case secret.Annotations[annotationRetainedFrom] == postgresDB.Name: + logger.Info("Re-adopting orphaned secret", "name", secretName) + return r.adoptResource(ctx, postgresDB, secret) + } + return nil +} + +// getSecret fetches a Secret by name, returning nil if not found. +// Non-NotFound errors are treated as real failures — a transient API error +// must not cause a spurious Create attempt. +func (r *PostgresDatabaseReconciler) getSecret(ctx context.Context, namespace, name string) (*corev1.Secret, error) { + logger := log.FromContext(ctx) + + secret := &corev1.Secret{} + err := r.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) + if errors.IsNotFound(err) { + return nil, nil + } + if err != nil { + logger.Error(err, "Failed to check secret existence", "secret", name) + return nil, err + } + return secret, nil +} + +// createUserSecret generates a password, builds the Secret, and creates it. +// AlreadyExists is treated as success — safe to retry after a partial failure. +func (r *PostgresDatabaseReconciler) createUserSecret( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + roleName string, + secretName string, +) error { + logger := log.FromContext(ctx) + + password, err := generatePassword() + if err != nil { + logger.Error(err, "Failed to generate password", "secret", secretName) + return err + } + + secret := buildPasswordSecret(postgresDB, secretName, roleName, password) + if err := controllerutil.SetControllerReference(postgresDB, secret, r.Scheme); err != nil { + return fmt.Errorf("failed to set owner reference on Secret %s: %w", secretName, err) + } + if err := r.Create(ctx, secret); err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + logger.Error(err, "Failed to create secret", "secret", secretName) + return err + } + return nil +} + +// buildPasswordSecret constructs the Secret object with "username" and "password" keys required by CNPG. +// OwnerRef is set by the caller. +func buildPasswordSecret(postgresDB *enterprisev4.PostgresDatabase, secretName, roleName, password string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: postgresDB.Namespace, + Labels: map[string]string{ + labelManagedBy: "splunk-operator", + labelCNPGReload: "true", + }, + }, + Data: map[string][]byte{ + "username": []byte(roleName), + "password": []byte(password), + }, + } +} + +// configMapName mirrors roleSecretName() so creation and status wiring share one source of truth. +func configMapName(postgresDBName, dbName string) string { + return fmt.Sprintf("%s-%s-config", postgresDBName, dbName) +} + +// clusterEndpoints holds fully-resolved connection hostnames for a cluster. +// PoolerRWHost and PoolerROHost are empty when connection pooling is disabled. +type clusterEndpoints struct { + RWHost string + ROHost string + PoolerRWHost string + PoolerROHost string +} + +func resolveClusterEndpoints(cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster, namespace string) clusterEndpoints { + // FQDN so consumers in other namespaces can resolve without extra config. + endpoints := clusterEndpoints{ + RWHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.WriteService, namespace), + ROHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.ReadService, namespace), + } + // Pooler service names follow the pattern set by postgrescluster_controller: {cnpgClusterName}-pooler-{rw|ro}. + if cluster.Status.ConnectionPoolerStatus != nil && cluster.Status.ConnectionPoolerStatus.Enabled { + endpoints.PoolerRWHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readWriteEndpoint, namespace) + endpoints.PoolerROHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readOnlyEndpoint, namespace) + } + return endpoints +} + +// buildDatabaseConfigMapBody is a pure function — no API calls, no decisions about which +// endpoints exist. All that is resolved upstream and encoded in endpoints before this is called. +func buildDatabaseConfigMapBody( + dbName string, + endpoints clusterEndpoints, +) map[string]string { + data := map[string]string{ + "dbname": dbName, + "port": postgresPort, + "rw-host": endpoints.RWHost, + "ro-host": endpoints.ROHost, + "admin-user": adminRoleName(dbName), + "rw-user": rwRoleName(dbName), + } + // Pooler keys are only written when pooling is active + if endpoints.PoolerRWHost != "" { + data["pooler-rw-host"] = endpoints.PoolerRWHost + } + if endpoints.PoolerROHost != "" { + data["pooler-ro-host"] = endpoints.PoolerROHost + } + return data +} + +// reconcileRoleConfigMaps mirrors reconcileUserSecrets: checks per-database, +// creates only what is absent. Endpoints are resolved by the caller so this function +// has a single responsibility: iteration and existence-gated creation. +// Orphaned ConfigMaps from a previous retain-deletion are re-adopted. +func (r *PostgresDatabaseReconciler) reconcileRoleConfigMaps( + ctx context.Context, + postgresDB *enterprisev4.PostgresDatabase, + endpoints clusterEndpoints, +) error { + logger := log.FromContext(ctx) + + for _, dbSpec := range postgresDB.Spec.Databases { + cmName := configMapName(postgresDB.Name, dbSpec.Name) + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: postgresDB.Namespace, + Labels: map[string]string{ + labelManagedBy: "splunk-operator", + }, + }, + } + + _, err := controllerutil.CreateOrUpdate(ctx, r.Client, cm, func() error { + cm.Data = buildDatabaseConfigMapBody(dbSpec.Name, endpoints) + + // Set ownerRef on creation or re-adoption (orphaned objects have no ownerRef). + reAdopting := cm.Annotations[annotationRetainedFrom] == postgresDB.Name + if reAdopting { + logger.Info("Re-adopting orphaned ConfigMap", "name", cmName) + delete(cm.Annotations, annotationRetainedFrom) + } + if cm.CreationTimestamp.IsZero() || reAdopting { + if err := controllerutil.SetControllerReference(postgresDB, cm, r.Scheme); err != nil { + logger.Error(err, "Failed to set owner reference on ConfigMap", "configmap", cm.Name) + return err + } + } + return nil + }) + if err != nil { + logger.Error(err, "failed to create or update database configmap", "db", postgresDB.Name, "configmap", cmName) + return fmt.Errorf("failed to create or update database configmap %s: %w", cmName, err) + } + } + return nil +} + +// populateDatabaseStatus derives all secret ref names via roleSecretName() — the same function +// used during creation — so status refs are always consistent with actual secret names. +// Recomputing from spec rather than reading live secret names keeps this side-effect free. +func populateDatabaseStatus(postgresDB *enterprisev4.PostgresDatabase) []enterprisev4.DatabaseInfo { + databases := make([]enterprisev4.DatabaseInfo, 0, len(postgresDB.Spec.Databases)) + for _, dbSpec := range postgresDB.Spec.Databases { + databases = append(databases, enterprisev4.DatabaseInfo{ + Name: dbSpec.Name, + Ready: true, + AdminUserSecretRef: &corev1.LocalObjectReference{ + Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin), + }, + RWUserSecretRef: &corev1.LocalObjectReference{ + Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW), + }, + ConfigMapRef: &corev1.LocalObjectReference{ + Name: configMapName(postgresDB.Name, dbSpec.Name), + }, + }) + } + return databases +} diff --git a/internal/controller/postgresdatabase_controller_test.go b/internal/controller/postgresdatabase_controller_test.go new file mode 100644 index 000000000..4e0589cad --- /dev/null +++ b/internal/controller/postgresdatabase_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" +) + +var _ = Describe("Database Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + database := &enterprisev4.PostgresDatabase{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Database") + err := k8sClient.Get(ctx, typeNamespacedName, database) + if err != nil && errors.IsNotFound(err) { + resource := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &enterprisev4.PostgresDatabase{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Database") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &PostgresDatabaseReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/postgresoperator_common_types.go b/internal/controller/postgresoperator_common_types.go new file mode 100644 index 000000000..984b51134 --- /dev/null +++ b/internal/controller/postgresoperator_common_types.go @@ -0,0 +1,136 @@ +package controller + +import ( + corev1 "k8s.io/api/core/v1" + "time" +) + +// This struct is used to compare the merged configuration from PostgresClusterClass and PostgresClusterSpec +// in a normalized way, and not to use CNPG-default values which are causing false positive diff state while reconciliation loop. +// It contains only the fields that are relevant for our reconciliation and that we want to compare when deciding whether to update the CNPG Cluster spec or not. +type normalizedCNPGClusterSpec struct { + ImageName string + Instances int + // Parameters we set, instead of complete spec from CNPG + CustomDefinedParameters map[string]string + PgHBA []string + DefaultDatabase string + Owner string + StorageSize string + Resources corev1.ResourceRequirements +} + +type reconcileDBPhases string +type reconcileClusterPhases string +type conditionTypes string +type conditionReasons string +type clusterReadyStatus string +type objectKind string + +const ( + // retryDelay is the default requeue interval when waiting on external state (CNPG, cluster). + retryDelay = time.Second * 15 + deletionPolicyRetain string = "Retain" + + // clusterNotFoundRetryDelay is longer than retryDelay — a missing cluster is unlikely + // to appear in 15 s and hammering the API is wasteful. + clusterNotFoundRetryDelay = time.Second * 30 + // cluster endpoint suffixes + readOnlyEndpoint string = "ro" + readWriteEndpoint string = "rw" + // default database name + defaultDatabaseName string = "postgres" + postgresDatabaseFinalizerName string = "postgresdatabases.enterprise.splunk.com/finalizer" + + annotationRetainedFrom string = "enterprise.splunk.com/retained-from" + + defaultSecretSuffix string = "-secret" + + defaultPoolerSuffix string = "-pooler-" + defaultConfigMapSuffix string = "-configmap" + defaultPort string = "5432" + superUsername string = "postgres" + postgresClusterFinalizerName string = "postgresclusters.enterprise.splunk.com/finalizer" + clusterDeletionPolicyDelete string = "Delete" + clusterDeletionPolicyRetain string = "Retain" + + // phases + readyDBPhase reconcileDBPhases = "Ready" + pendingDBPhase reconcileDBPhases = "Pending" + provisioningDBPhase reconcileDBPhases = "Provisioning" + failedDBPhase reconcileDBPhases = "Failed" + + // cluster phases + readyClusterPhase reconcileClusterPhases = "Ready" + pendingClusterPhase reconcileClusterPhases = "Pending" + provisioningClusterPhase reconcileClusterPhases = "Provisioning" + configuringClusterPhase reconcileClusterPhases = "Configuring" + failedClusterPhase reconcileClusterPhases = "Failed" + + // Condition types + clusterReady conditionTypes = "ClusterReady" + poolerReady conditionTypes = "PoolerReady" + rolesReady conditionTypes = "RolesReady" + databasesReady conditionTypes = "DatabasesReady" + secretsReady conditionTypes = "SecretsReady" + configMapsReady conditionTypes = "ConfigMapsReady" + configMapReady conditionTypes = "ConfigMapReady" + privilegesReady conditionTypes = "PrivilegesReady" + + // Condition reasons + reasonClusterNotFound conditionReasons = "ClusterNotFound" + reasonClusterProvisioning conditionReasons = "ClusterProvisioning" + reasonClusterInfoFetchFailed conditionReasons = "ClusterInfoFetchNotPossible" + reasonClusterAvailable conditionReasons = "ClusterAvailable" + reasonDatabasesAvailable conditionReasons = "DatabasesAvailable" + reasonSecretsCreated conditionReasons = "SecretsCreated" + reasonSecretsCreationFailed conditionReasons = "SecretsCreationFailed" + reasonWaitingForCNPG conditionReasons = "WaitingForCNPG" + reasonUsersCreationFailed conditionReasons = "UsersCreationFailed" + reasonUsersAvailable conditionReasons = "UsersAvailable" + reasonRoleConflict conditionReasons = "RoleConflict" + reasonSuperUserSecretFailed conditionReasons = "SuperUserSecretFailed" + reasonConfigMapsCreationFailed conditionReasons = "ConfigMapsCreationFailed" + reasonConfigMapsCreated conditionReasons = "ConfigMapsCreated" + reasonPrivilegesGranted conditionReasons = "PrivilegesGranted" + reasonPrivilegesGrantFailed conditionReasons = "PrivilegesGrantFailed" + + // Additional condition reasons for clusterReady conditionType + reasonClusterClassNotFound conditionReasons = "ClusterClassNotFound" + reasonManagedRolesFailed conditionReasons = "ManagedRolesReconciliationFailed" + reasonClusterBuildFailed conditionReasons = "ClusterBuildFailed" + reasonClusterBuildSucceeded conditionReasons = "ClusterBuildSucceeded" + reasonClusterGetFailed conditionReasons = "ClusterGetFailed" + reasonClusterPatchFailed conditionReasons = "ClusterPatchFailed" + reasonInvalidConfiguration conditionReasons = "InvalidConfiguration" + reasonConfigMapFailed conditionReasons = "ConfigMapReconciliationFailed" + reasonUserSecretFailed conditionReasons = "UserSecretReconciliationFailed" + + // Additional condition reasons for poolerReady conditionType + reasonPoolerReconciliationFailed conditionReasons = "PoolerReconciliationFailed" + reasonPoolerConfigMissing conditionReasons = "PoolerConfigMissing" + reasonPoolerCreating conditionReasons = "PoolerCreating" + reasonAllInstancesReady conditionReasons = "AllInstancesReady" + + // Additional condition reasons for mapping CNPG cluster statuses + reasonCNPGClusterHealthy conditionReasons = "CNPGClusterHealthy" + reasonCNPGProvisioning conditionReasons = "CNPGClusterProvisioning" + reasonCNPGSwitchover conditionReasons = "CNPGSwitchover" + reasonCNPGFailingOver conditionReasons = "CNPGFailingOver" + reasonCNPGRestarting conditionReasons = "CNPGRestarting" + reasonCNPGUpgrading conditionReasons = "CNPGUpgrading" + reasonCNPGApplyingConfig conditionReasons = "CNPGApplyingConfiguration" + reasonCNPGPromoting conditionReasons = "CNPGPromoting" + reasonCNPGWaitingForUser conditionReasons = "CNPGWaitingForUser" + reasonCNPGUnrecoverable conditionReasons = "CNPGUnrecoverable" + reasonCNPGProvisioningFailed conditionReasons = "CNPGProvisioningFailed" + reasonCNPGPluginError conditionReasons = "CNPGPluginError" + reasonCNPGImageError conditionReasons = "CNPGImageError" + reasonClusterDeleteFailed conditionReasons = "ClusterDeleteFailed" + + // Cluster status + ClusterNotFound clusterReadyStatus = "NotFound" + ClusterNotReady clusterReadyStatus = "NotReady" + ClusterNoProvisionerRef clusterReadyStatus = "NoProvisionerRef" + ClusterReady clusterReadyStatus = "Ready" +) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index be2c1a50f..94db6a730 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -1,5 +1,5 @@ /* -Copyright (c) 2018-2022 Splunk Inc. All rights reserved. +Copyright 2026. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,151 +18,99 @@ package controller import ( "context" - "fmt" + "os" "path/filepath" "testing" - "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "go.uber.org/zap/zapcore" + + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - clientgoscheme "k8s.io/client-go/kubernetes/scheme" - ctrl "sigs.k8s.io/controller-runtime" - - enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - //+kubebuilder:scaffold:imports + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + // +kubebuilder:scaffold:imports ) -var cfg *rest.Config -var k8sClient client.Client -var testEnv *envtest.Environment -var k8sManager ctrl.Manager +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. -func TestAPIs(t *testing.T) { +var ( + ctx context.Context + cancel context.CancelFunc + testEnv *envtest.Environment + cfg *rest.Config + k8sClient client.Client +) + +func TestControllers(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") - } -var _ = BeforeSuite(func(ctx context.Context) { - opts := zap.Options{ - Development: true, - TimeEncoder: zapcore.RFC3339NanoTimeEncoder, - } - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.UseFlagOptions(&opts))) +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - By("bootstrapping test environment") + ctx, cancel = context.WithCancel(context.TODO()) + + var err error + err = enterprisev4.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // +kubebuilder:scaffold:scheme + By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, } - var err error + // Retrieve the first found binary directory to allow running tests from IDEs + if getFirstFoundEnvTestBinaryDir() != "" { + testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir() + } // cfg is defined in this file globally. cfg, err = testEnv.Start() Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApiV3.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApiV3.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - err = enterpriseApi.AddToScheme(clientgoscheme.Scheme) - Expect(err).NotTo(HaveOccurred()) - - //+kubebuilder:scaffold:scheme - - // Create New Manager for controller - k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{ - Scheme: clientgoscheme.Scheme, - }) - Expect(err).ToNot(HaveOccurred()) - if err := (&ClusterManagerReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&ClusterMasterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&IndexerClusterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&LicenseManagerReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&LicenseMasterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&MonitoringConsoleReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&SearchHeadClusterReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - if err := (&StandaloneReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } - - go func() { - err = k8sManager.Start(ctrl.SetupSignalHandler()) - fmt.Printf("error %v", err.Error()) - Expect(err).ToNot(HaveOccurred()) - }() - - Expect(err).ToNot(HaveOccurred()) - - k8sClient, err = client.New(cfg, client.Options{Scheme: clientgoscheme.Scheme}) + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) - -}, NodeTimeout(time.Second*500)) +}) var _ = AfterSuite(func() { By("tearing down the test environment") - testEnv.Stop() + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) }) + +// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path. +// ENVTEST-based tests depend on specific binaries, usually located in paths set by +// controller-runtime. When running tests directly (e.g., via an IDE) without using +// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured. +// +// This function streamlines the process by finding the required binaries, similar to +// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are +// properly set up, run 'make setup-envtest' beforehand. +func getFirstFoundEnvTestBinaryDir() string { + basePath := filepath.Join("..", "..", "bin", "k8s") + entries, err := os.ReadDir(basePath) + if err != nil { + logf.Log.Error(err, "Failed to read directory", "path", basePath) + return "" + } + for _, entry := range entries { + if entry.IsDir() { + return filepath.Join(basePath, entry.Name()) + } + } + return "" +} diff --git a/pkg/splunk/common/names.go b/pkg/splunk/common/names.go index b0f7e94ca..c232f6b45 100644 --- a/pkg/splunk/common/names.go +++ b/pkg/splunk/common/names.go @@ -108,6 +108,9 @@ const ( // MockClientInduceErrorDelete represents an error for delete Api MockClientInduceErrorDelete = "mockClientDeleteError" + // MockClientInduceErrorApply represents an error for apply Api + MockClientInduceErrorApply = "mockClientApplyError" + // Rerr represents a random error strting Rerr = "randomError" ) diff --git a/pkg/splunk/test/controller.go b/pkg/splunk/test/controller.go index 6e5871cc4..ab26b7bfa 100644 --- a/pkg/splunk/test/controller.go +++ b/pkg/splunk/test/controller.go @@ -391,6 +391,21 @@ func (c MockClient) Create(ctx context.Context, obj client.Object, opts ...clien return nil } +// Apply applies the given apply configuration to the mock client's state +func (c MockClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + // Check for induced errors + if value, ok := c.InduceErrorKind[splcommon.MockClientInduceErrorApply]; ok && value != nil { + return value + } + c.Calls["Apply"] = append(c.Calls["Apply"], MockFuncCall{ + CTX: ctx, + // Note: obj is ApplyConfiguration interface, not client.Object + // For mock purposes, we just record the call + }) + // For mock purposes, we treat Apply similar to Update/Create + return nil +} + // Delete returns mock client's Err field func (c MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { // Check for induced errors diff --git a/pkg/splunk/util/util.go b/pkg/splunk/util/util.go index a393d7703..d9b2f095d 100644 --- a/pkg/splunk/util/util.go +++ b/pkg/splunk/util/util.go @@ -170,7 +170,7 @@ func PodExecCommand(ctx context.Context, c splcommon.ControllerClient, podName s return "", "", err } } - restClient, err := podExecRESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := podExecRESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err } diff --git a/pkg/splunk/util/util_test.go b/pkg/splunk/util/util_test.go index 5e61f1676..6fc39007f 100644 --- a/pkg/splunk/util/util_test.go +++ b/pkg/splunk/util/util_test.go @@ -48,7 +48,7 @@ var fakePodExecGetConfig = func() (*rest.Config, error) { } // Faking RESTClientForGVK -var fakePodExecRESTClientForGVK = func(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, client *http.Client) (rest.Interface, error) { +var fakePodExecRESTClientForGVK = func(gvk schema.GroupVersionKind, forceDisableProtoBuf bool, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, client *http.Client) (rest.Interface, error) { return &fakeRestInterface{}, errors.New("fakeerror") } diff --git a/test/connect-to-postgres-cluster.sh b/test/connect-to-postgres-cluster.sh new file mode 100755 index 000000000..5f45e92d2 --- /dev/null +++ b/test/connect-to-postgres-cluster.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# filepath: scripts/test-postgres-connection.sh + +set -e + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Default values +NAMESPACE="${NAMESPACE:-default}" +POSTGRES_CLUSTER_NAME="${1:-}" + +if [ -z "$POSTGRES_CLUSTER_NAME" ]; then + echo -e "${RED}Error: PostgresCluster name is required${NC}" + echo "Usage: $0 [namespace]" + echo "Example: $0 my-postgres-cluster default" + exit 1 +fi + +if [ -n "$2" ]; then + NAMESPACE="$2" +fi + +echo -e "${YELLOW}Connecting to PostgresCluster: $POSTGRES_CLUSTER_NAME in namespace: $NAMESPACE${NC}" + +# Get ConfigMap name from PostgresCluster status +CONFIGMAP_NAME=$(kubectl get postgrescluster "$POSTGRES_CLUSTER_NAME" -n "$NAMESPACE" \ + -o jsonpath='{.status.resources.configMapRef.name}' 2>/dev/null) + +if [ -z "$CONFIGMAP_NAME" ]; then + echo -e "${RED}Error: ConfigMap reference not found in PostgresCluster status${NC}" + echo "Make sure the PostgresCluster is ready and the ConfigMap has been created" + exit 1 +fi + +# Get Secret name from PostgresCluster status +SECRET_NAME=$(kubectl get postgrescluster "$POSTGRES_CLUSTER_NAME" -n "$NAMESPACE" \ + -o jsonpath='{.status.resources.secretRef.name}' 2>/dev/null) + +if [ -z "$SECRET_NAME" ]; then + echo -e "${RED}Error: Secret reference not found in PostgresCluster status${NC}" + echo "Make sure the PostgresCluster is ready and the Secret has been created" + exit 1 +fi + +echo -e "${GREEN}Found ConfigMap: $CONFIGMAP_NAME${NC}" +echo -e "${GREEN}Found Secret: $SECRET_NAME${NC}" + +# Extract connection details from ConfigMap (using correct uppercase keys) +echo -e "\n${YELLOW}Extracting connection details...${NC}" +DB_PORT=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.DEFAULT_CLUSTER_PORT}') +DB_USER=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.SUPER_USER_NAME}') +RW_SERVICE_FQDN=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.CLUSTER_RW_ENDPOINT}') +RO_SERVICE_FQDN=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.CLUSTER_RO_ENDPOINT}') +R_SERVICE_FQDN=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.CLUSTER_R_ENDPOINT}') + +# Extract just the service name (first part before the dot) +RW_SERVICE=$(echo "$RW_SERVICE_FQDN" | cut -d'.' -f1) +RO_SERVICE=$(echo "$RO_SERVICE_FQDN" | cut -d'.' -f1) +R_SERVICE=$(echo "$R_SERVICE_FQDN" | cut -d'.' -f1) + +# Extract password from Secret +DB_PASSWORD=$(kubectl get secret "$SECRET_NAME" -n "$NAMESPACE" -o jsonpath='{.data.password}' | base64 -d) + +# Get database name from CNPG cluster (assuming it matches the PostgresCluster name or is 'app') +DB_NAME=$(kubectl get cluster "$POSTGRES_CLUSTER_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.bootstrap.initdb.database}' 2>/dev/null || echo "postgres") + +echo -e "${GREEN}Connection Details:${NC}" +echo " RW Service: $RW_SERVICE_FQDN" +echo " RO Service: $RO_SERVICE_FQDN" +echo " R Service: $R_SERVICE_FQDN" +echo " Port: $DB_PORT" +echo " Database: $DB_NAME" +echo " User: $DB_USER" + +# Check if psql is installed +if ! command -v psql &> /dev/null; then + echo -e "\n${YELLOW}psql client not found. Using kubectl run with postgres image...${NC}" + + echo -e "${YELLOW}Creating temporary pod for connection test...${NC}" + + kubectl run postgres-client-test \ + --rm -i --tty \ + --image=postgres:16 \ + --restart=Never \ + --namespace="$NAMESPACE" \ + --env="PGPASSWORD=$DB_PASSWORD" \ + -- psql -h "$RW_SERVICE_FQDN" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" +else + # Use port-forward for local connection + echo -e "\n${YELLOW}Setting up port-forward to PostgreSQL service...${NC}" + + # Kill any existing port-forward on 5432 + pkill -f "kubectl.*port-forward.*$RW_SERVICE" 2>/dev/null || true + + # Start port-forward in background (use service name only, not FQDN) + kubectl port-forward -n "$NAMESPACE" "service/$RW_SERVICE" 5432:$DB_PORT > /dev/null 2>&1 & + PORT_FORWARD_PID=$! + + # Cleanup function + cleanup() { + echo -e "\n${YELLOW}Cleaning up port-forward...${NC}" + kill $PORT_FORWARD_PID 2>/dev/null || true + } + trap cleanup EXIT + + # Wait for port-forward to be ready + echo -e "${YELLOW}Waiting for port-forward to be ready...${NC}" + sleep 3 + + echo -e "${GREEN}Connecting to PostgreSQL...${NC}" + echo -e "${YELLOW}Password: $DB_PASSWORD${NC}\n" + + # Use connection string format which is more reliable + # Disable GSSAPI and use password authentication only + PGPASSWORD="$DB_PASSWORD" psql "postgresql://$DB_USER@localhost:5432/$DB_NAME?gssencmode=disable" \ + || PGPASSWORD="$DB_PASSWORD" psql -h localhost -p 5432 -U "$DB_USER" -d "$DB_NAME" --no-psqlrc +fi \ No newline at end of file diff --git a/test/postgrescluster-retain-upgrade-flow.sh b/test/postgrescluster-retain-upgrade-flow.sh new file mode 100755 index 000000000..69124c536 --- /dev/null +++ b/test/postgrescluster-retain-upgrade-flow.sh @@ -0,0 +1,356 @@ +#!/usr/bin/env bash +# run make install make run in a separate terminal to have the operator running while this test executes +# this test verifies that when a PostgresCluster with clusterDeletionPolicy=Retain is deleted, the underlying CNPG Cluster and superuser Secret are not deleted and can be re-attached to a new PostgresCluster with the same name (simulating a major version upgrade flow where the cluster needs to be recreated). +# then, in a separate terminal, run: NAMESPACE=your-namespace UPGRADE_POSTGRES_VERSION=16 ./test/postgrescluster-retain-upgrade-flow.sh + + +set -euo pipefail + +ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +TEST_DIR="$ROOT_DIR/test" +SAMPLES_DIR="$ROOT_DIR/config/samples" + +CLUSTER_MANIFEST="${CLUSTER_MANIFEST:-$SAMPLES_DIR/enterprise_v4_postgrescluster_dev.yaml}" +DATABASE_MANIFEST="${DATABASE_MANIFEST:-$SAMPLES_DIR/enterprise_v4_postgresdatabase.yaml}" +CONNECT_SCRIPT="${CONNECT_SCRIPT:-$TEST_DIR/connect-to-postgres-cluster.sh}" +UPGRADE_POSTGRES_VERSION="${UPGRADE_POSTGRES_VERSION:-16}" +POLL_INTERVAL="${POLL_INTERVAL:-5}" +TIMEOUT_SECONDS="${TIMEOUT_SECONDS:-900}" +REQUIRE_POSTGRESDATABASE_READY="${REQUIRE_POSTGRESDATABASE_READY:-0}" + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +log() { + echo -e "${YELLOW}[$(date '+%Y-%m-%d %H:%M:%S')] $*${NC}" +} + +pass() { + echo -e "${GREEN}[PASS] $*${NC}" +} + +fail() { + echo -e "${RED}[FAIL] $*${NC}" >&2 + exit 1 +} + +require_file() { + local path="$1" + [[ -f "$path" ]] || fail "Required file not found: $path" +} + +require_command() { + local cmd="$1" + command -v "$cmd" >/dev/null 2>&1 || fail "Required command not found: $cmd" +} + +current_namespace() { + local ns + ns="$(kubectl config view --minify --output 'jsonpath={..namespace}' 2>/dev/null || true)" + if [[ -z "$ns" ]]; then + ns="default" + fi + printf '%s' "$ns" +} + +preflight_namespace() { + local deletion_ts phase + deletion_ts="$(kubectl get ns "$NAMESPACE" -o jsonpath='{.metadata.deletionTimestamp}' 2>/dev/null || true)" + phase="$(kubectl get ns "$NAMESPACE" -o jsonpath='{.status.phase}' 2>/dev/null || true)" + if [[ -n "$deletion_ts" || "$phase" == "Terminating" ]]; then + fail "Namespace $NAMESPACE is terminating (deletionTimestamp=$deletion_ts phase=$phase). Use a non-terminating namespace." + fi +} + +preflight_cluster_dns() { + local host + host="${CLUSTER_NAME}-rw.${NAMESPACE}.svc.cluster.local" + if getent hosts "$host" >/dev/null 2>&1; then + return 0 + fi + + log "Cluster DNS name is not resolvable from this machine: $host" + log "This does not block local connection tests (we use kubectl port-forward), but it blocks PostgresDatabase DB-connection/privilege phases when the operator runs out-of-cluster (make run)." + log "Fix: run the operator in-cluster or use telepresence/kubefwd to get cluster DNS/networking on your machine." + + SKIP_POSTGRESDATABASE_READY_CHECK=1 + if [[ "$REQUIRE_POSTGRESDATABASE_READY" == "1" ]]; then + fail "PostgresDatabase readiness required (REQUIRE_POSTGRESDATABASE_READY=1) but cluster DNS is not available." + fi + + log "Continuing with degraded PostgresDatabase checks (readiness will not be required)." +} + +resource_exists() { + local resource="$1" + local name="$2" + kubectl get "$resource" "$name" -n "$NAMESPACE" >/dev/null 2>&1 +} + +jsonpath_value() { + local resource="$1" + local name="$2" + local jsonpath="$3" + kubectl get "$resource" "$name" -n "$NAMESPACE" -o "jsonpath=${jsonpath}" 2>/dev/null +} + +wait_for_jsonpath() { + local resource="$1" + local name="$2" + local jsonpath="$3" + local expected="$4" + local timeout="${5:-$TIMEOUT_SECONDS}" + local deadline=$((SECONDS + timeout)) + local value="" + + while (( SECONDS < deadline )); do + value="$(jsonpath_value "$resource" "$name" "$jsonpath" || true)" + if [[ "$value" == "$expected" ]]; then + pass "$resource/$name reached ${jsonpath}=${expected}" + return 0 + fi + sleep "$POLL_INTERVAL" + done + + fail "Timed out waiting for $resource/$name to reach ${jsonpath}=${expected}. Last value: ${value:-}" +} + +wait_for_contains() { + local resource="$1" + local name="$2" + local jsonpath="$3" + local expected_substring="$4" + local timeout="${5:-$TIMEOUT_SECONDS}" + local deadline=$((SECONDS + timeout)) + local value="" + + while (( SECONDS < deadline )); do + value="$(jsonpath_value "$resource" "$name" "$jsonpath" || true)" + if [[ "$value" == *"$expected_substring"* ]]; then + pass "$resource/$name contains ${expected_substring} in ${jsonpath}" + return 0 + fi + sleep "$POLL_INTERVAL" + done + + fail "Timed out waiting for $resource/$name to contain ${expected_substring} in ${jsonpath}. Last value: ${value:-}" +} + +wait_for_absence() { + local resource="$1" + local name="$2" + local timeout="${3:-$TIMEOUT_SECONDS}" + local deadline=$((SECONDS + timeout)) + + while (( SECONDS < deadline )); do + if ! resource_exists "$resource" "$name"; then + pass "$resource/$name is absent" + return 0 + fi + sleep "$POLL_INTERVAL" + done + + fail "Timed out waiting for $resource/$name to be deleted" +} + +wait_for_presence() { + local resource="$1" + local name="$2" + local timeout="${3:-$TIMEOUT_SECONDS}" + local deadline=$((SECONDS + timeout)) + + while (( SECONDS < deadline )); do + if resource_exists "$resource" "$name"; then + pass "$resource/$name exists" + return 0 + fi + sleep "$POLL_INTERVAL" + done + + fail "Timed out waiting for $resource/$name to exist" +} + +wait_for_owner_reference() { + local resource="$1" + local name="$2" + local owner_kind="$3" + local owner_name="$4" + local owner_uid="$5" + local timeout="${6:-$TIMEOUT_SECONDS}" + local deadline=$((SECONDS + timeout)) + local owners="" + local expected="${owner_kind}:${owner_name}:${owner_uid}" + + while (( SECONDS < deadline )); do + owners="$(jsonpath_value "$resource" "$name" '{range .metadata.ownerReferences[*]}{.kind}:{.name}:{.uid}{"\n"}{end}' || true)" + if [[ "$owners" == *"$expected"* ]]; then + pass "$resource/$name is owned by ${owner_kind}/${owner_name}" + return 0 + fi + sleep "$POLL_INTERVAL" + done + + fail "Timed out waiting for $resource/$name to be owned by ${owner_kind}/${owner_name}. Owners: ${owners:-}" +} + +run_connection_check() { + log "Checking superuser connection with $CONNECT_SCRIPT" + printf 'SELECT current_user;\n\\q\n' | bash "$CONNECT_SCRIPT" "$CLUSTER_NAME" "$NAMESPACE" + pass "Superuser connection succeeded" +} + +patch_cluster() { + local deletion_policy="$1" + local pooler_enabled="$2" + kubectl patch postgrescluster "$CLUSTER_NAME" -n "$NAMESPACE" --type merge \ + -p "{\"spec\":{\"clusterDeletionPolicy\":\"${deletion_policy}\",\"connectionPoolerEnabled\":${pooler_enabled}}}" >/dev/null +} + +apply_upgraded_cluster_manifest() { + local tmp_manifest + tmp_manifest="$(mktemp)" + + sed \ + -e "s/^\([[:space:]]*clusterDeletionPolicy:\).*/\1 Retain/" \ + -e "s/^\([[:space:]]*postgresVersion:\).*/\1 \"${UPGRADE_POSTGRES_VERSION}\"/" \ + "$CLUSTER_MANIFEST" > "$tmp_manifest" + + kubectl apply -n "$NAMESPACE" -f "$tmp_manifest" >/dev/null + rm -f "$tmp_manifest" +} + +assert_cluster_ready() { + wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.status.phase}' 'Ready' + wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.status.conditions[?(@.type=="ClusterReady")].status}' 'True' + wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.status.conditions[?(@.type=="ConfigMapReady")].status}' 'True' +} + +assert_database_created() { + wait_for_presence postgresdatabase "$DATABASE_NAME" + for db in "${DATABASES[@]}"; do + wait_for_presence databases.postgresql.cnpg.io "${DATABASE_NAME}-${db}" + done + pass "PostgresDatabase CR exists and CNPG Database CRs are present" +} + +assert_database_ready() { + if [[ "${SKIP_POSTGRESDATABASE_READY_CHECK:-0}" == "1" ]]; then + assert_database_created + return 0 + fi + wait_for_jsonpath postgresdatabase "$DATABASE_NAME" '{.status.phase}' 'Ready' + wait_for_jsonpath postgresdatabase "$DATABASE_NAME" '{.status.observedGeneration}' \ + "$(jsonpath_value postgresdatabase "$DATABASE_NAME" '{.metadata.generation}')" +} + +record_cluster_artifacts() { + SUPERUSER_SECRET_NAME="$(jsonpath_value postgrescluster "$CLUSTER_NAME" '{.status.resources.secretRef.name}')" + CONFIGMAP_NAME="$(jsonpath_value postgrescluster "$CLUSTER_NAME" '{.status.resources.configMapRef.name}')" + + [[ -n "$SUPERUSER_SECRET_NAME" ]] || fail "PostgresCluster status.resources.secretRef.name is empty" + [[ -n "$CONFIGMAP_NAME" ]] || fail "PostgresCluster status.resources.configMapRef.name is empty" +} + +cleanup_database_cr() { + if resource_exists postgresdatabase "$DATABASE_NAME"; then + log "Deleting PostgresDatabase/$DATABASE_NAME to leave the namespace clean" + kubectl delete postgresdatabase "$DATABASE_NAME" -n "$NAMESPACE" --wait=false >/dev/null + wait_for_absence postgresdatabase "$DATABASE_NAME" + fi +} + +require_command kubectl +require_file "$CLUSTER_MANIFEST" +require_file "$DATABASE_MANIFEST" +require_file "$CONNECT_SCRIPT" + +NAMESPACE="${NAMESPACE:-$(current_namespace)}" +CLUSTER_NAME="${CLUSTER_NAME:-$(kubectl create --dry-run=client -f "$CLUSTER_MANIFEST" -o jsonpath='{.metadata.name}')}" +DATABASE_NAME="${DATABASE_NAME:-$(kubectl create --dry-run=client -f "$DATABASE_MANIFEST" -o jsonpath='{.metadata.name}')}" +DATABASES_STR="$(kubectl create --dry-run=client -f "$DATABASE_MANIFEST" -o jsonpath='{range .spec.databases[*]}{.name}{" "}{end}')" +read -r -a DATABASES <<< "${DATABASES_STR:-}" +RW_POOLER_NAME="${CLUSTER_NAME}-pooler-rw" +RO_POOLER_NAME="${CLUSTER_NAME}-pooler-ro" + +log "Using namespace: $NAMESPACE" +log "Cluster manifest: $CLUSTER_MANIFEST" +log "Database manifest: $DATABASE_MANIFEST" +log "Upgrade target postgresVersion: $UPGRADE_POSTGRES_VERSION" + +preflight_namespace +preflight_cluster_dns + +log "1. Creating PostgresCluster from sample manifest" +kubectl apply -n "$NAMESPACE" -f "$CLUSTER_MANIFEST" + +log "2. Creating PostgresDatabase from sample manifest" +kubectl apply -n "$NAMESPACE" -f "$DATABASE_MANIFEST" + +log "3. Waiting for PostgresCluster and PostgresDatabase to become ready" +assert_cluster_ready +assert_database_ready +record_cluster_artifacts +pass "PostgresCluster and PostgresDatabase were created successfully" + +log "4. Verifying superuser connection to PostgresCluster" +run_connection_check + +log "5. Setting clusterDeletionPolicy=Retain and connectionPoolerEnabled=false" +patch_cluster "Retain" "false" +wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.spec.clusterDeletionPolicy}' 'Retain' +wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.spec.connectionPoolerEnabled}' 'false' +wait_for_absence pooler.postgresql.cnpg.io "$RW_POOLER_NAME" +wait_for_absence pooler.postgresql.cnpg.io "$RO_POOLER_NAME" +assert_cluster_ready + +log "6. Setting connectionPoolerEnabled=true and waiting for poolers" +patch_cluster "Retain" "true" +wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.spec.connectionPoolerEnabled}' 'true' +wait_for_presence pooler.postgresql.cnpg.io "$RW_POOLER_NAME" +wait_for_presence pooler.postgresql.cnpg.io "$RO_POOLER_NAME" +wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.status.conditions[?(@.type=="PoolerReady")].status}' 'True' +assert_cluster_ready + +log "7. Deleting PostgresCluster with retention enabled" +kubectl delete postgrescluster "$CLUSTER_NAME" -n "$NAMESPACE" --wait=false >/dev/null +wait_for_absence postgrescluster "$CLUSTER_NAME" +wait_for_presence cluster.postgresql.cnpg.io "$CLUSTER_NAME" +wait_for_presence secret "$SUPERUSER_SECRET_NAME" +pass "CNPG cluster and superuser secret remained after PostgresCluster deletion" + +log "8. Recreating PostgresCluster with a major version upgrade" +apply_upgraded_cluster_manifest +wait_for_presence postgrescluster "$CLUSTER_NAME" +wait_for_contains cluster.postgresql.cnpg.io "$CLUSTER_NAME" '{.spec.imageName}' ":${UPGRADE_POSTGRES_VERSION}" +assert_cluster_ready +record_cluster_artifacts + +log "9. Checking that retained resources were re-attached to the recreated PostgresCluster" +POSTGRES_CLUSTER_UID="$(jsonpath_value postgrescluster "$CLUSTER_NAME" '{.metadata.uid}')" +wait_for_owner_reference cluster.postgresql.cnpg.io "$CLUSTER_NAME" "PostgresCluster" "$CLUSTER_NAME" "$POSTGRES_CLUSTER_UID" +wait_for_owner_reference secret "$SUPERUSER_SECRET_NAME" "PostgresCluster" "$CLUSTER_NAME" "$POSTGRES_CLUSTER_UID" + +log "10. Verifying superuser connection after recreate/upgrade" +run_connection_check + +log "11. Setting clusterDeletionPolicy=Delete" +kubectl patch postgrescluster "$CLUSTER_NAME" -n "$NAMESPACE" --type merge \ + -p '{"spec":{"clusterDeletionPolicy":"Delete"}}' >/dev/null +wait_for_jsonpath postgrescluster "$CLUSTER_NAME" '{.spec.clusterDeletionPolicy}' 'Delete' + +log "12. Deleting the PostgresCluster" +kubectl delete postgrescluster "$CLUSTER_NAME" -n "$NAMESPACE" --wait=false >/dev/null +wait_for_absence postgrescluster "$CLUSTER_NAME" + +log "13. Checking that no cluster leftovers remain" +cleanup_database_cr +wait_for_absence cluster.postgresql.cnpg.io "$CLUSTER_NAME" +wait_for_absence pooler.postgresql.cnpg.io "$RW_POOLER_NAME" +wait_for_absence pooler.postgresql.cnpg.io "$RO_POOLER_NAME" +wait_for_absence secret "$SUPERUSER_SECRET_NAME" +wait_for_absence configmap "$CONFIGMAP_NAME" +pass "No PostgresCluster leftovers remain in namespace $NAMESPACE" + +log "Flow finished successfully" diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 85e753a84..d4d4de337 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -217,7 +217,7 @@ func (d *Deployment) PodExecCommand(ctx context.Context, podName string, cmd []s return "", "", err } //FIXME - restClient, err := apiutil.RESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := apiutil.RESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err } @@ -264,7 +264,7 @@ func (d *Deployment) OperatorPodExecCommand(ctx context.Context, podName string, return "", "", err } //FIXME - restClient, err := apiutil.RESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := apiutil.RESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err } diff --git a/test/testenv/ingest_utils.go b/test/testenv/ingest_utils.go index 2c0403b1e..d4606ef31 100644 --- a/test/testenv/ingest_utils.go +++ b/test/testenv/ingest_utils.go @@ -187,7 +187,7 @@ func CopyFileToPod(ctx context.Context, podName string, srcPath string, destPath if err != nil { return "", "", err } - restClient, err := apiutil.RESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := apiutil.RESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err }