diff --git a/api/v4/postgrescluster_types.go b/api/v4/postgrescluster_types.go index 4464eaf1b..6ddb14c9d 100644 --- a/api/v4/postgrescluster_types.go +++ b/api/v4/postgrescluster_types.go @@ -30,15 +30,14 @@ type ManagedRole struct { // +kubebuilder:validation:MaxLength=63 Name string `json:"name"` - // PasswordSecretRef references a Secret containing the password for this role. - // The Secret should have a key "password" with the password value. + // PasswordSecretRef references a Secret and the key within it containing the password for this role. // +optional - PasswordSecretRef *corev1.LocalObjectReference `json:"passwordSecretRef,omitempty"` + PasswordSecretRef *corev1.SecretKeySelector `json:"passwordSecretRef,omitempty"` - // Ensure controls whether the role should exist (present) or not (absent). - // +kubebuilder:validation:Enum=present;absent - // +kubebuilder:default=present - Ensure string `json:"ensure,omitempty"` + // Exists controls whether the role should be present (true) or absent (false) in PostgreSQL. + // +kubebuilder:default=true + // +optional + Exists bool `json:"exists,omitempty"` } // PostgresClusterSpec defines the desired state of PostgresCluster. @@ -113,7 +112,7 @@ type PostgresClusterSpec struct { // +kubebuilder:validation:Enum=Delete;Retain // +kubebuilder:default=Retain // +optional - ClusterDeletionPolicy string `json:"clusterDeletionPolicy,omitempty"` + ClusterDeletionPolicy *string `json:"clusterDeletionPolicy,omitempty"` } // PostgresClusterResources defines references to Kubernetes resources related to the PostgresCluster, such as ConfigMaps and Secrets. @@ -123,10 +122,8 @@ type PostgresClusterResources struct { // +optional ConfigMapRef *corev1.LocalObjectReference `json:"configMapRef,omitempty"` - // SecretRef references the Secret with superuser credentials. - // Contains: passwords for superuser // +optional - SecretRef *corev1.LocalObjectReference `json:"secretRef,omitempty"` + SuperUserSecretRef *corev1.SecretKeySelector `json:"secretRef,omitempty"` } // PostgresClusterStatus defines the observed state of PostgresCluster. @@ -134,7 +131,7 @@ type PostgresClusterStatus struct { // Phase represents the current phase of the PostgresCluster. // Values: "Pending", "Provisioning", "Failed", "Ready", "Deleting" // +optional - Phase string `json:"phase,omitempty"` + Phase *string `json:"phase,omitempty"` // Conditions represent the latest available observations of the PostgresCluster's state. // +optional diff --git a/api/v4/postgresclusterclass_types.go b/api/v4/postgresclusterclass_types.go index 92d4a2b24..9945ec669 100644 --- a/api/v4/postgresclusterclass_types.go +++ b/api/v4/postgresclusterclass_types.go @@ -22,6 +22,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +// +kubebuilder:validation:XValidation:rule="!has(self.cnpg) || self.provisioner == 'postgresql.cnpg.io'",message="cnpg config can only be set when provisioner is postgresql.cnpg.io" // +kubebuilder:validation:XValidation:rule="!has(self.config) || !has(self.config.connectionPoolerEnabled) || !self.config.connectionPoolerEnabled || (has(self.cnpg) && has(self.cnpg.connectionPooler))",message="cnpg.connectionPooler must be set when config.connectionPoolerEnabled is true" // PostgresClusterClassSpec defines the desired state of PostgresClusterClass. // PostgresClusterClass is immutable after creation - it serves as a template for Cluster CRs. @@ -37,7 +38,7 @@ type PostgresClusterClassSpec struct { // Can be overridden in PostgresCluster CR. // +kubebuilder:default={} // +optional - Config PosgresClusterClassConfig `json:"config,omitempty"` + Config *PostgresClusterClassConfig `json:"config,omitempty"` // CNPG contains CloudNativePG-specific configuration and policies. // Only used when Provisioner is "postgresql.cnpg.io" @@ -46,9 +47,9 @@ type PostgresClusterClassSpec struct { CNPG *CNPGConfig `json:"cnpg,omitempty"` } -// PosgresClusterClassConfig contains provider-agnostic cluster configuration. +// PostgresClusterClassConfig contains provider-agnostic cluster configuration. // These fields define PostgresCluster infrastructure and can be overridden in PostgresCluster CR. -type PosgresClusterClassConfig struct { +type PostgresClusterClassConfig struct { // Instances is the number of database instances (1 primary + N replicas). // Single instance (1) is suitable for development. // High availability requires at least 3 instances (1 primary + 2 replicas). @@ -149,7 +150,7 @@ type CNPGConfig struct { // +kubebuilder:validation:Enum=restart;switchover // +kubebuilder:default=switchover // +optional - PrimaryUpdateMethod string `json:"primaryUpdateMethod,omitempty"` + PrimaryUpdateMethod *string `json:"primaryUpdateMethod,omitempty"` // ConnectionPooler contains PgBouncer connection pooler configuration. // When enabled, creates RW and RO pooler deployments for clusters using this class. @@ -166,7 +167,7 @@ type PostgresClusterClassStatus struct { // Phase represents the current phase of the PostgresClusterClass. // Valid phases: "Ready", "Invalid" // +optional - Phase string `json:"phase,omitempty"` + Phase *string `json:"phase,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/postgresdatabase_types.go b/api/v4/postgresdatabase_types.go index 4a4a280f0..edab619b0 100644 --- a/api/v4/postgresdatabase_types.go +++ b/api/v4/postgresdatabase_types.go @@ -24,6 +24,7 @@ import ( // PostgresDatabaseSpec defines the desired state of PostgresDatabase. // +kubebuilder:validation:XValidation:rule="self.clusterRef == oldSelf.clusterRef",message="clusterRef is immutable" type PostgresDatabaseSpec struct { + // Reference to Postgres Cluster managed by postgresCluster controller // +kubebuilder:validation:Required ClusterRef corev1.LocalObjectReference `json:"clusterRef"` @@ -48,22 +49,22 @@ type DatabaseInfo struct { Name string `json:"name"` Ready bool `json:"ready"` DatabaseRef *corev1.LocalObjectReference `json:"databaseRef,omitempty"` - AdminUserSecretRef *corev1.LocalObjectReference `json:"adminUserSecretRef,omitempty"` - RWUserSecretRef *corev1.LocalObjectReference `json:"rwUserSecretRef,omitempty"` + AdminUserSecretRef *corev1.SecretKeySelector `json:"adminUserSecretRef,omitempty"` + RWUserSecretRef *corev1.SecretKeySelector `json:"rwUserSecretRef,omitempty"` ConfigMapRef *corev1.LocalObjectReference `json:"configMap,omitempty"` } // PostgresDatabaseStatus defines the observed state of PostgresDatabase. type PostgresDatabaseStatus struct { // +optional - Phase string `json:"phase,omitempty"` + Phase *string `json:"phase,omitempty"` // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` // +optional Databases []DatabaseInfo `json:"databases,omitempty"` // ObservedGeneration represents the .metadata.generation that the status was set based upon. // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 32f7bc429..b103d8495 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -184,6 +184,11 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CNPGConfig) DeepCopyInto(out *CNPGConfig) { *out = *in + if in.PrimaryUpdateMethod != nil { + in, out := &in.PrimaryUpdateMethod, &out.PrimaryUpdateMethod + *out = new(string) + **out = **in + } if in.ConnectionPooler != nil { in, out := &in.ConnectionPooler, &out.ConnectionPooler *out = new(ConnectionPoolerConfig) @@ -453,13 +458,13 @@ func (in *DatabaseInfo) DeepCopyInto(out *DatabaseInfo) { } if in.AdminUserSecretRef != nil { in, out := &in.AdminUserSecretRef, &out.AdminUserSecretRef - *out = new(v1.LocalObjectReference) - **out = **in + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) } if in.RWUserSecretRef != nil { in, out := &in.RWUserSecretRef, &out.RWUserSecretRef - *out = new(v1.LocalObjectReference) - **out = **in + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) } if in.ConfigMapRef != nil { in, out := &in.ConfigMapRef, &out.ConfigMapRef @@ -775,8 +780,8 @@ func (in *ManagedRole) DeepCopyInto(out *ManagedRole) { *out = *in if in.PasswordSecretRef != nil { in, out := &in.PasswordSecretRef, &out.PasswordSecretRef - *out = new(v1.LocalObjectReference) - **out = **in + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) } } @@ -937,58 +942,6 @@ func (in *PhaseInfo) DeepCopy() *PhaseInfo { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PosgresClusterClassConfig) DeepCopyInto(out *PosgresClusterClassConfig) { - *out = *in - if in.Instances != nil { - in, out := &in.Instances, &out.Instances - *out = new(int32) - **out = **in - } - if in.Storage != nil { - in, out := &in.Storage, &out.Storage - x := (*in).DeepCopy() - *out = &x - } - if in.PostgresVersion != nil { - in, out := &in.PostgresVersion, &out.PostgresVersion - *out = new(string) - **out = **in - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(v1.ResourceRequirements) - (*in).DeepCopyInto(*out) - } - if in.PostgreSQLConfig != nil { - in, out := &in.PostgreSQLConfig, &out.PostgreSQLConfig - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.PgHBA != nil { - in, out := &in.PgHBA, &out.PgHBA - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ConnectionPoolerEnabled != nil { - in, out := &in.ConnectionPoolerEnabled, &out.ConnectionPoolerEnabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PosgresClusterClassConfig. -func (in *PosgresClusterClassConfig) DeepCopy() *PosgresClusterClassConfig { - if in == nil { - return nil - } - out := new(PosgresClusterClassConfig) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresCluster) DeepCopyInto(out *PostgresCluster) { *out = *in @@ -1043,6 +996,58 @@ func (in *PostgresClusterClass) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassConfig) DeepCopyInto(out *PostgresClusterClassConfig) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } + if in.PostgresVersion != nil { + in, out := &in.PostgresVersion, &out.PostgresVersion + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.PostgreSQLConfig != nil { + in, out := &in.PostgreSQLConfig, &out.PostgreSQLConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PgHBA != nil { + in, out := &in.PgHBA, &out.PgHBA + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConnectionPoolerEnabled != nil { + in, out := &in.ConnectionPoolerEnabled, &out.ConnectionPoolerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassConfig. +func (in *PostgresClusterClassConfig) DeepCopy() *PostgresClusterClassConfig { + if in == nil { + return nil + } + out := new(PostgresClusterClassConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresClusterClassList) DeepCopyInto(out *PostgresClusterClassList) { *out = *in @@ -1078,7 +1083,11 @@ func (in *PostgresClusterClassList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresClusterClassSpec) DeepCopyInto(out *PostgresClusterClassSpec) { *out = *in - in.Config.DeepCopyInto(&out.Config) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(PostgresClusterClassConfig) + (*in).DeepCopyInto(*out) + } if in.CNPG != nil { in, out := &in.CNPG, &out.CNPG *out = new(CNPGConfig) @@ -1106,6 +1115,11 @@ func (in *PostgresClusterClassStatus) DeepCopyInto(out *PostgresClusterClassStat (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassStatus. @@ -1158,10 +1172,10 @@ func (in *PostgresClusterResources) DeepCopyInto(out *PostgresClusterResources) *out = new(v1.LocalObjectReference) **out = **in } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(v1.LocalObjectReference) - **out = **in + if in.SuperUserSecretRef != nil { + in, out := &in.SuperUserSecretRef, &out.SuperUserSecretRef + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) } } @@ -1227,6 +1241,11 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ClusterDeletionPolicy != nil { + in, out := &in.ClusterDeletionPolicy, &out.ClusterDeletionPolicy + *out = new(string) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterSpec. @@ -1242,6 +1261,11 @@ func (in *PostgresClusterSpec) DeepCopy() *PostgresClusterSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { *out = *in + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) @@ -1366,6 +1390,11 @@ func (in *PostgresDatabaseSpec) DeepCopy() *PostgresDatabaseSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PostgresDatabaseStatus) DeepCopyInto(out *PostgresDatabaseStatus) { *out = *in + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]metav1.Condition, len(*in)) @@ -1380,6 +1409,11 @@ func (in *PostgresDatabaseStatus) DeepCopyInto(out *PostgresDatabaseStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabaseStatus. diff --git a/bundle.Dockerfile b/bundle.Dockerfile index 396f16e00..7a08487c3 100644 --- a/bundle.Dockerfile +++ b/bundle.Dockerfile @@ -5,9 +5,8 @@ LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ LABEL operators.operatorframework.io.bundle.package.v1=splunk-operator -LABEL operators.operatorframework.io.bundle.channels.v1=stable -LABEL operators.operatorframework.io.bundle.channel.default.v1: stable -LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.39.0 +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.42.0 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 diff --git a/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml b/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml index 343506c50..b80a1b1fb 100644 --- a/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml +++ b/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -356,7 +356,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -371,7 +370,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -537,7 +535,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -552,7 +549,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -645,8 +641,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -715,7 +711,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -730,7 +725,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -896,7 +890,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -911,7 +904,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1276,7 +1268,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1334,6 +1328,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1626,7 +1657,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2028,13 +2059,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2162,6 +2192,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2502,7 +2534,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2513,7 +2544,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2583,6 +2613,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2614,8 +2646,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2653,8 +2687,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2673,8 +2709,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2726,6 +2763,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2835,8 +2874,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3238,15 +3276,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3302,6 +3338,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3347,9 +3384,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3365,6 +3402,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3400,7 +3439,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3424,12 +3463,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3483,7 +3521,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3508,7 +3546,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3633,8 +3671,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3650,8 +3689,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3924,6 +3966,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4016,8 +4163,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -4056,7 +4204,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4128,8 +4276,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4261,8 +4410,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4307,8 +4457,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_clustermasters.yaml b/bundle/manifests/enterprise.splunk.com_clustermasters.yaml index 1bca4aa49..74a84d3fe 100644 --- a/bundle/manifests/enterprise.splunk.com_clustermasters.yaml +++ b/bundle/manifests/enterprise.splunk.com_clustermasters.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -352,7 +352,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -367,7 +366,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -533,7 +531,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -548,7 +545,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -641,8 +637,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -711,7 +707,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -726,7 +721,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -892,7 +886,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -907,7 +900,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1272,7 +1264,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1330,6 +1324,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1622,7 +1653,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2024,13 +2055,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2158,6 +2188,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2498,7 +2530,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2509,7 +2540,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2579,6 +2609,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2610,8 +2642,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2649,8 +2683,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2669,8 +2705,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2722,6 +2759,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2831,8 +2870,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3234,15 +3272,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3298,6 +3334,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3343,9 +3380,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3361,6 +3398,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3396,7 +3435,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3420,12 +3459,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3479,7 +3517,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3504,7 +3542,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3629,8 +3667,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3646,8 +3685,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3920,6 +3962,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4012,8 +4159,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -4052,7 +4200,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4124,8 +4272,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4257,8 +4406,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4303,8 +4453,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_databaseclasses.yaml b/bundle/manifests/enterprise.splunk.com_databaseclasses.yaml deleted file mode 100644 index ce8cd63c8..000000000 --- a/bundle/manifests/enterprise.splunk.com_databaseclasses.yaml +++ /dev/null @@ -1,62 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.1 - creationTimestamp: null - labels: - name: splunk-operator - name: databaseclasses.enterprise.splunk.com -spec: - group: enterprise.splunk.com - names: - kind: DatabaseClass - listKind: DatabaseClassList - plural: databaseclasses - singular: databaseclass - scope: Namespaced - versions: - - name: v4 - schema: - openAPIV3Schema: - description: DatabaseClass is the Schema for the databaseclasses API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: DatabaseClassSpec defines the desired state of DatabaseClass. - properties: - foo: - description: Foo is an example field of DatabaseClass. Edit databaseclass_types.go - to remove/update - type: string - type: object - status: - description: DatabaseClassStatus defines the observed state of DatabaseClass. - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_databases.yaml b/bundle/manifests/enterprise.splunk.com_databases.yaml deleted file mode 100644 index 203cf3cd8..000000000 --- a/bundle/manifests/enterprise.splunk.com_databases.yaml +++ /dev/null @@ -1,62 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: v0.16.1 - creationTimestamp: null - labels: - name: splunk-operator - name: databases.enterprise.splunk.com -spec: - group: enterprise.splunk.com - names: - kind: Database - listKind: DatabaseList - plural: databases - singular: database - scope: Namespaced - versions: - - name: v4 - schema: - openAPIV3Schema: - description: Database is the Schema for the databases API. - properties: - apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources - type: string - kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - metadata: - type: object - spec: - description: DatabaseSpec defines the desired state of Database. - properties: - foo: - description: Foo is an example field of Database. Edit database_types.go - to remove/update - type: string - type: object - status: - description: DatabaseStatus defines the observed state of Database. - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: null - storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml b/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml index 551ee9c96..cf404f5b3 100644 --- a/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml +++ b/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -359,7 +359,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -374,7 +373,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -540,7 +538,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -555,7 +552,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -648,8 +644,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -718,7 +714,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -733,7 +728,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -899,7 +893,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -914,7 +907,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1124,7 +1116,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1182,6 +1176,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1479,7 +1510,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -1881,13 +1912,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2015,6 +2045,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2238,7 +2270,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2249,7 +2280,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2319,6 +2349,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2350,8 +2382,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2389,8 +2423,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2409,8 +2445,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2462,6 +2499,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2571,8 +2610,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -2974,15 +3012,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3038,6 +3074,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3083,9 +3120,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3101,6 +3138,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3136,7 +3175,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3160,12 +3199,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3219,7 +3257,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3244,7 +3282,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3369,8 +3407,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3386,8 +3425,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3660,6 +3702,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3752,8 +3899,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3792,7 +3940,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -3864,8 +4012,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -3997,8 +4146,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4043,8 +4193,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -4531,7 +4683,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4546,7 +4697,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4712,7 +4862,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4727,7 +4876,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4820,8 +4968,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -4890,7 +5038,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4905,7 +5052,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5071,7 +5217,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5086,7 +5231,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5296,7 +5440,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5354,6 +5500,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5651,7 +5834,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6053,13 +6236,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -6187,6 +6369,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -6410,7 +6594,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -6421,7 +6604,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -6491,6 +6673,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -6522,8 +6706,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -6561,8 +6747,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -6581,8 +6769,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -6634,6 +6823,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -6743,8 +6934,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7146,15 +7336,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7210,6 +7398,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -7255,9 +7444,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -7273,6 +7462,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -7308,7 +7499,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -7332,12 +7523,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -7391,7 +7581,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -7416,7 +7606,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -7541,8 +7731,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -7558,8 +7749,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -7832,6 +8026,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -7924,8 +8223,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -7964,7 +8264,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8036,8 +8336,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -8169,8 +8470,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -8215,8 +8517,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml b/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml index 82b3b6743..50e5cbf40 100644 --- a/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml +++ b/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -346,7 +346,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -361,7 +360,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -527,7 +525,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -542,7 +539,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -635,8 +631,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -705,7 +701,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -720,7 +715,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -886,7 +880,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -901,7 +894,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1266,7 +1258,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1324,6 +1318,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1616,7 +1647,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2018,13 +2049,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2152,6 +2182,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2375,7 +2407,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2386,7 +2417,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2456,6 +2486,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2487,8 +2519,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2526,8 +2560,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2546,8 +2582,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2599,6 +2636,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2708,8 +2747,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3111,15 +3149,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3175,6 +3211,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3220,9 +3257,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3238,6 +3275,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3273,7 +3312,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3297,12 +3336,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3356,7 +3394,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3381,7 +3419,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3506,8 +3544,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3523,8 +3562,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3797,6 +3839,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3889,8 +4036,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3929,7 +4077,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4001,8 +4149,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4134,8 +4283,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4180,8 +4330,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_licensemasters.yaml b/bundle/manifests/enterprise.splunk.com_licensemasters.yaml index 9ef2b9e0b..4ed74e1bd 100644 --- a/bundle/manifests/enterprise.splunk.com_licensemasters.yaml +++ b/bundle/manifests/enterprise.splunk.com_licensemasters.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -341,7 +341,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -356,7 +355,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -522,7 +520,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -537,7 +534,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -630,8 +626,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -700,7 +696,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -715,7 +710,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -881,7 +875,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -896,7 +889,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1261,7 +1253,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1319,6 +1313,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1611,7 +1642,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2013,13 +2044,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2147,6 +2177,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2370,7 +2402,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2381,7 +2412,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2451,6 +2481,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2482,8 +2514,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2521,8 +2555,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2541,8 +2577,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2594,6 +2631,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2703,8 +2742,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3106,15 +3144,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3170,6 +3206,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3215,9 +3252,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3233,6 +3270,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3268,7 +3307,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3292,12 +3331,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3351,7 +3389,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3376,7 +3414,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3501,8 +3539,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3518,8 +3557,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3792,6 +3834,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3884,8 +4031,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3924,7 +4072,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -3996,8 +4144,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4129,8 +4278,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4175,8 +4325,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml b/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml index 7b36dc27f..1024fef36 100644 --- a/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml +++ b/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -348,7 +348,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -363,7 +362,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -529,7 +527,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -544,7 +541,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -637,8 +633,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -707,7 +703,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -722,7 +717,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -888,7 +882,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -903,7 +896,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1268,7 +1260,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1326,6 +1320,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1618,7 +1649,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2020,13 +2051,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2154,6 +2184,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2377,7 +2409,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2388,7 +2419,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2458,6 +2488,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2489,8 +2521,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2528,8 +2562,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2548,8 +2584,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2601,6 +2638,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2710,8 +2749,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3113,15 +3151,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3177,6 +3213,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3222,9 +3259,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3240,6 +3277,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3275,7 +3314,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3299,12 +3338,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3358,7 +3396,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3383,7 +3421,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3508,8 +3546,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3525,8 +3564,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3799,6 +3841,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3891,8 +4038,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3931,7 +4079,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4003,8 +4151,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4136,8 +4285,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4182,8 +4332,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -4865,7 +5017,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4880,7 +5031,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5046,7 +5196,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5061,7 +5210,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5154,8 +5302,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5224,7 +5372,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5239,7 +5386,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5405,7 +5551,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5420,7 +5565,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5785,7 +5929,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5843,6 +5989,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6135,7 +6318,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6537,13 +6720,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -6671,6 +6853,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -6894,7 +7078,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -6905,7 +7088,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -6975,6 +7157,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -7006,8 +7190,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -7045,8 +7231,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -7065,8 +7253,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -7118,6 +7307,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7227,8 +7418,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7630,15 +7820,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7694,6 +7882,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -7739,9 +7928,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -7757,6 +7946,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -7792,7 +7983,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -7816,12 +8007,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -7875,7 +8065,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -7900,7 +8090,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8025,8 +8215,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -8042,8 +8233,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -8316,6 +8510,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8408,8 +8707,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8448,7 +8748,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8520,8 +8820,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -8653,8 +8954,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -8699,8 +9001,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_postgresclusterclasses.yaml b/bundle/manifests/enterprise.splunk.com_postgresclusterclasses.yaml new file mode 100644 index 000000000..b564ca757 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_postgresclusterclasses.yaml @@ -0,0 +1,332 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + creationTimestamp: null + labels: + name: splunk-operator + name: postgresclusterclasses.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresClusterClass + listKind: PostgresClusterClassList + plural: postgresclusterclasses + singular: postgresclusterclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.provisioner + name: Provisioner + type: string + - jsonPath: .spec.postgresClusterConfig.instances + name: Instances + type: integer + - jsonPath: .spec.postgresClusterConfig.storage + name: Storage + type: string + - jsonPath: .spec.postgresClusterConfig.postgresVersion + name: Version + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterClassSpec defines the desired state of PostgresClusterClass. + PostgresClusterClass is immutable after creation - it serves as a template for Cluster CRs. + properties: + cnpg: + description: |- + CNPG contains CloudNativePG-specific configuration and policies. + Only used when Provisioner is "postgresql.cnpg.io" + These settings CANNOT be overridden in PostgresCluster CR (platform policy). + properties: + connectionPooler: + description: |- + ConnectionPooler contains PgBouncer connection pooler configuration. + When enabled, creates RW and RO pooler deployments for clusters using this class. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + primaryUpdateMethod: + default: switchover + description: |- + PrimaryUpdateMethod determines how the primary instance is updated. + "restart" - tolerate brief downtime (suitable for development) + "switchover" - minimal downtime via automated failover (production-grade) + + NOTE: When using "switchover", ensure clusterConfig.instances > 1. + Switchover requires at least one replica to fail over to. + enum: + - restart + - switchover + type: string + type: object + config: + default: {} + description: |- + PostgresClusterConfig contains cluster-level configuration. + These settings apply to PostgresCluster infrastructure. + Can be overridden in PostgresCluster CR. + properties: + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed. + When true, creates RW and RO pooler deployments for clusters using this class. + Can be overridden in PostgresCluster CR. + type: boolean + instances: + default: 1 + description: |- + Instances is the number of database instances (1 primary + N replicas). + Single instance (1) is suitable for development. + High availability requires at least 3 instances (1 primary + 2 replicas). + format: int32 + maximum: 10 + minimum: 1 + type: integer + pgHBA: + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + default: "18" + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + description: |- + PostgreSQLConfig contains PostgreSQL engine configuration parameters. + Maps to postgresql.conf settings (cluster-wide). + Example: {"max_connections": "200", "shared_buffers": "2GB"} + type: object + resources: + description: |- + Resources defines CPU and memory requests/limits per instance. + All instances in the cluster have the same resources. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + default: 50Gi + description: |- + Storage is the size of persistent volume for each instance. + Cannot be decreased after cluster creation (PostgreSQL limitation). + Recommended minimum: 10Gi for production viability. + Example: "50Gi", "100Gi", "1Ti" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + provisioner: + description: |- + Provisioner identifies which database provisioner to use. + Currently supported: "postgresql.cnpg.io" (CloudNativePG) + type: string + required: + - provisioner + type: object + x-kubernetes-validations: + - message: cnpg config can only be set when provisioner is postgresql.cnpg.io + rule: '!has(self.cnpg) || self.provisioner == ''postgresql.cnpg.io''' + - message: cnpg.connectionPooler must be set when config.connectionPoolerEnabled + is true + rule: '!has(self.config) || !has(self.config.connectionPoolerEnabled) + || !self.config.connectionPoolerEnabled || (has(self.cnpg) && has(self.cnpg.connectionPooler))' + status: + description: PostgresClusterClassStatus defines the observed state of + PostgresClusterClass. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresClusterClass state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + phase: + description: |- + Phase represents the current phase of the PostgresClusterClass. + Valid phases: "Ready", "Invalid" + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_postgresclusters.yaml b/bundle/manifests/enterprise.splunk.com_postgresclusters.yaml new file mode 100644 index 000000000..abc6ddfd0 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_postgresclusters.yaml @@ -0,0 +1,477 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + creationTimestamp: null + labels: + name: splunk-operator + name: postgresclusters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresCluster + listKind: PostgresClusterList + plural: postgresclusters + singular: postgrescluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.class + name: Class + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresCluster is the Schema for the postgresclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterSpec defines the desired state of PostgresCluster. + Validation rules ensure immutability of Class, and that Storage and PostgresVersion can only be set once and cannot be removed or downgraded. + properties: + class: + description: This field is IMMUTABLE after creation. + minLength: 1 + type: string + x-kubernetes-validations: + - message: class is immutable + rule: self == oldSelf + clusterDeletionPolicy: + default: Retain + description: ClusterDeletionPolicy controls the deletion behavior + of the underlying CNPG Cluster when the PostgresCluster is deleted. + enum: + - Delete + - Retain + type: string + connectionPoolerConfig: + description: Only takes effect when connection pooling is enabled. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed for this cluster. + When set, takes precedence over the class-level connectionPoolerEnabled value. + type: boolean + instances: + description: Instances overrides the number of PostgreSQL instances + from ClusterClass. + format: int32 + maximum: 10 + minimum: 1 + type: integer + managedRoles: + description: |- + ManagedRoles contains PostgreSQL roles that should be created in the cluster. + This field supports Server-Side Apply with per-role granularity, allowing + multiple PostgresDatabase controllers to manage different roles independently. + items: + description: ManagedRole represents a PostgreSQL role to be created + and managed in the cluster. + properties: + exists: + default: true + description: Exists controls whether the role should be present + (true) or absent (false) in PostgreSQL. + type: boolean + name: + description: Name of the role/user to create. + maxLength: 63 + minLength: 1 + type: string + passwordSecretRef: + description: PasswordSecretRef references a Secret and the key + within it containing the password for this role. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + pgHBA: + default: [] + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Maps to pg_hba.conf settings. + Default empty array prevents panic. + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + default: {} + description: |- + PostgreSQL overrides PostgreSQL engine parameters from ClusterClass. + Maps to postgresql.conf settings. + Default empty map prevents panic. + Example: {"shared_buffers": "128MB", "log_min_duration_statement": "500ms"} + type: object + resources: + description: Resources overrides CPU/memory resources from ClusterClass. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + description: |- + Storage overrides the storage size from ClusterClass. + Example: "5Gi" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - class + type: object + x-kubernetes-validations: + - messageExpression: '!has(self.postgresVersion) ? ''postgresVersion cannot + be removed once set (was: '' + oldSelf.postgresVersion + '')'' : ''postgresVersion + major version cannot be downgraded (from: '' + oldSelf.postgresVersion + + '', to: '' + self.postgresVersion + '')''' + rule: '!has(oldSelf.postgresVersion) || (has(self.postgresVersion) && + int(self.postgresVersion.split(''.'')[0]) >= int(oldSelf.postgresVersion.split(''.'')[0]))' + - messageExpression: '!has(self.storage) ? ''storage cannot be removed + once set (was: '' + string(oldSelf.storage) + '')'' : ''storage size + cannot be decreased (from: '' + string(oldSelf.storage) + '', to: + '' + string(self.storage) + '')''' + rule: '!has(oldSelf.storage) || (has(self.storage) && quantity(self.storage).compareTo(quantity(oldSelf.storage)) + >= 0)' + - message: connectionPoolerConfig cannot be overridden on PostgresCluster + rule: '!has(self.connectionPoolerConfig)' + status: + description: PostgresClusterStatus defines the observed state of PostgresCluster. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresCluster's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + connectionPoolerStatus: + description: |- + ConnectionPoolerStatus contains the observed state of the connection pooler. + Only populated when connection pooler is enabled in the PostgresClusterClass. + properties: + enabled: + description: Enabled indicates whether pooler is active for this + cluster. + type: boolean + type: object + managedRolesStatus: + description: ManagedRolesStatus tracks the reconciliation status of + managed roles. + properties: + failed: + additionalProperties: + type: string + description: Failed contains roles that failed to reconcile with + error messages. + type: object + pending: + description: Pending contains roles that are being created but + not yet ready. + items: + type: string + type: array + reconciled: + description: Reconciled contains roles that have been successfully + created and are ready. + items: + type: string + type: array + type: object + phase: + description: |- + Phase represents the current phase of the PostgresCluster. + Values: "Pending", "Provisioning", "Failed", "Ready", "Deleting" + type: string + provisionerRef: + description: |- + ProvisionerRef contains reference to the provisioner resource managing this PostgresCluster. + Right now, only CNPG is supported. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + resources: + description: Resources contains references to related Kubernetes resources + like ConfigMaps and Secrets. + properties: + configMapRef: + description: |- + ConfigMapRef references the ConfigMap with connection endpoints. + Contains: CLUSTER_ENDPOINTS, POOLER_ENDPOINTS (if connection pooler enabled) + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + secretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_postgresdatabases.yaml b/bundle/manifests/enterprise.splunk.com_postgresdatabases.yaml new file mode 100644 index 000000000..badbc70b8 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_postgresdatabases.yaml @@ -0,0 +1,267 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + creationTimestamp: null + labels: + name: splunk-operator + name: postgresdatabases.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresDatabase + listKind: PostgresDatabaseList + plural: postgresdatabases + singular: postgresdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.clusterRef.name + name: Cluster + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresDatabase is the Schema for the postgresdatabases API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PostgresDatabaseSpec defines the desired state of PostgresDatabase. + properties: + clusterRef: + description: Reference to Postgres Cluster managed by postgresCluster + controller + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databases: + items: + properties: + deletionPolicy: + default: Delete + enum: + - Delete + - Retain + type: string + extensions: + items: + type: string + type: array + name: + maxLength: 30 + type: string + required: + - name + type: object + maxItems: 10 + minItems: 1 + type: array + x-kubernetes-validations: + - message: database names must be unique + rule: self.all(x, self.filter(y, y.name == x.name).size() == 1) + required: + - clusterRef + - databases + type: object + x-kubernetes-validations: + - message: clusterRef is immutable + rule: self.clusterRef == oldSelf.clusterRef + status: + description: PostgresDatabaseStatus defines the observed state of PostgresDatabase. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + databases: + items: + properties: + adminUserSecretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + configMap: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + name: + type: string + ready: + type: boolean + rwUserSecretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + observedGeneration: + description: ObservedGeneration represents the .metadata.generation + that the status was set based upon. + format: int64 + type: integer + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml b/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml index f280f9e94..b232ffba4 100644 --- a/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml +++ b/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -354,7 +354,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -369,7 +368,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -535,7 +533,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -550,7 +547,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -643,8 +639,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -713,7 +709,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -728,7 +723,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -894,7 +888,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -909,7 +902,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1274,7 +1266,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1332,6 +1326,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1629,7 +1660,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2031,13 +2062,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2165,6 +2195,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2388,7 +2420,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2399,7 +2430,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2469,6 +2499,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2500,8 +2532,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2539,8 +2573,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2559,8 +2595,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2612,6 +2649,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2721,8 +2760,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3124,15 +3162,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3188,6 +3224,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3233,9 +3270,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3251,6 +3288,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3286,7 +3325,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3310,12 +3349,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3369,7 +3407,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3394,7 +3432,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3519,8 +3557,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3536,8 +3575,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3810,6 +3852,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3902,8 +4049,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -3942,7 +4090,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4014,8 +4162,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4147,8 +4296,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4193,8 +4343,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -4958,7 +5110,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -4973,7 +5124,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5139,7 +5289,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5154,7 +5303,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5247,8 +5395,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5317,7 +5465,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5332,7 +5479,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5498,7 +5644,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5513,7 +5658,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -6058,7 +6202,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6135,7 +6279,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6193,6 +6339,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6490,7 +6673,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6892,13 +7075,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -7026,6 +7208,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -7249,7 +7433,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -7260,7 +7443,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -7330,6 +7512,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -7361,8 +7545,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -7400,8 +7586,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -7420,8 +7608,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -7473,6 +7662,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7582,8 +7773,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7985,15 +8175,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8049,6 +8237,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -8094,9 +8283,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -8112,6 +8301,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -8147,7 +8338,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -8171,12 +8362,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8230,7 +8420,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -8255,7 +8445,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8380,8 +8570,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -8397,8 +8588,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -8671,6 +8865,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8763,8 +9062,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8803,7 +9103,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8875,8 +9175,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -9008,8 +9309,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -9054,8 +9356,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_standalones.yaml b/bundle/manifests/enterprise.splunk.com_standalones.yaml index ff14c2184..decd94881 100644 --- a/bundle/manifests/enterprise.splunk.com_standalones.yaml +++ b/bundle/manifests/enterprise.splunk.com_standalones.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.1 + controller-gen.kubebuilder.io/version: v0.18.0 creationTimestamp: null labels: name: splunk-operator @@ -349,7 +349,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -364,7 +363,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -530,7 +528,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -545,7 +542,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -638,8 +634,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -708,7 +704,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -723,7 +718,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -889,7 +883,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -904,7 +897,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -1269,7 +1261,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1327,6 +1321,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1623,7 +1654,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2025,13 +2056,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -2159,6 +2189,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -2499,7 +2531,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -2510,7 +2541,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -2580,6 +2610,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -2611,8 +2643,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -2650,8 +2684,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -2670,8 +2706,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -2723,6 +2760,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -2832,8 +2871,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -3235,15 +3273,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3299,6 +3335,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -3344,9 +3381,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -3362,6 +3399,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -3397,7 +3436,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -3421,12 +3460,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3480,7 +3518,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -3505,7 +3543,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3630,8 +3668,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -3647,8 +3686,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -3921,6 +3963,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4013,8 +4160,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -4053,7 +4201,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -4125,8 +4273,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -4258,8 +4407,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -4304,8 +4454,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- @@ -5110,7 +5262,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5125,7 +5276,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5291,7 +5441,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5306,7 +5455,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5399,8 +5547,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5469,7 +5617,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5484,7 +5631,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5650,7 +5796,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both matchLabelKeys and labelSelector. Also, matchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -5665,7 +5810,6 @@ spec: pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. Also, mismatchLabelKeys cannot be set when labelSelector isn't set. - This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). items: type: string type: array @@ -6030,7 +6174,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6088,6 +6234,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6384,7 +6567,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6786,13 +6969,12 @@ spec: type: object trafficDistribution: description: |- - TrafficDistribution offers a way to express preferences for how traffic is - distributed to Service endpoints. Implementations can use this field as a - hint, but are not required to guarantee strict adherence. If the field is - not set, the implementation will apply its default routing strategy. If set - to "PreferClose", implementations should prioritize endpoints that are - topologically close (e.g., same zone). - This is an alpha field and requires enabling ServiceTrafficDistribution feature. + TrafficDistribution offers a way to express preferences for how traffic + is distributed to Service endpoints. Implementations can use this field + as a hint, but are not required to guarantee strict adherence. If the + field is not set, the implementation will apply its default routing + strategy. If set to "PreferClose", implementations should prioritize + endpoints that are in the same zone. type: string type: description: |- @@ -6920,6 +7102,8 @@ spec: Ports is a list of records of service ports If used, every port defined in the service should have an entry in it items: + description: PortStatus represents the error condition + of a service port properties: error: description: |- @@ -7260,7 +7444,6 @@ spec: - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. If this value is nil, the behavior is equivalent to the Honor policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string nodeTaintsPolicy: description: |- @@ -7271,7 +7454,6 @@ spec: - Ignore: node taints are ignored. All nodes are included. If this value is nil, the behavior is equivalent to the Ignore policy. - This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string topologyKey: description: |- @@ -7341,6 +7523,8 @@ spec: description: |- awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: AWSElasticBlockStore is deprecated. All operations for the in-tree + awsElasticBlockStore type are redirected to the ebs.csi.aws.com CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: @@ -7372,8 +7556,10 @@ spec: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on - the host and bind mount to the pod. + description: |- + azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + Deprecated: AzureDisk is deprecated. All operations for the in-tree azureDisk type + are redirected to the disk.csi.azure.com CSI driver. properties: cachingMode: description: 'cachingMode is the Host Caching mode: None, @@ -7411,8 +7597,10 @@ spec: - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount - on the host and bind mount to the pod. + description: |- + azureFile represents an Azure File Service mount on the host and bind mount to the pod. + Deprecated: AzureFile is deprecated. All operations for the in-tree azureFile type + are redirected to the file.csi.azure.com CSI driver. properties: readOnly: description: |- @@ -7431,8 +7619,9 @@ spec: - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that - shares a pod's lifetime + description: |- + cephFS represents a Ceph FS mount on the host that shares a pod's lifetime. + Deprecated: CephFS is deprecated and the in-tree cephfs type is no longer supported. properties: monitors: description: |- @@ -7484,6 +7673,8 @@ spec: cinder: description: |- cinder represents a cinder volume attached and mounted on kubelets host machine. + Deprecated: Cinder is deprecated. All operations for the in-tree cinder type + are redirected to the cinder.csi.openstack.org CSI driver. More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: @@ -7593,8 +7784,7 @@ spec: x-kubernetes-map-type: atomic csi: description: csi (Container Storage Interface) represents ephemeral - storage that is handled by certain external CSI drivers (Beta - feature). + storage that is handled by certain external CSI drivers. properties: driver: description: |- @@ -7996,15 +8186,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8060,6 +8248,7 @@ spec: description: |- flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + Deprecated: FlexVolume is deprecated. Consider using a CSIDriver instead. properties: driver: description: driver is the name of the driver to use for @@ -8105,9 +8294,9 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to - a kubelet's host machine. This depends on the Flocker control - service being running + description: |- + flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running. + Deprecated: Flocker is deprecated and the in-tree flocker type is no longer supported. properties: datasetName: description: |- @@ -8123,6 +8312,8 @@ spec: description: |- gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. + Deprecated: GCEPersistentDisk is deprecated. All operations for the in-tree + gcePersistentDisk type are redirected to the pd.csi.storage.gke.io CSI driver. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: @@ -8158,7 +8349,7 @@ spec: gitRepo: description: |- gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + Deprecated: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container. properties: @@ -8182,12 +8373,11 @@ spec: glusterfs: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md + Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8241,7 +8431,7 @@ spec: The types of objects that may be mounted by this volume are defined by the container runtime implementation on a host machine and at minimum must include all valid types supported by the container image field. The OCI object gets mounted in a single directory (spec.containers[*].volumeMounts.mountPath) by merging the manifest layers in the same way as for container images. The volume will be mounted read-only (ro) and non-executable files (noexec). - Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath). + Sub path mounts for containers are not supported (spec.containers[*].volumeMounts.subpath) before 1.33. The field spec.securityContext.fsGroupChangePolicy has no effect on this volume type. properties: pullPolicy: @@ -8266,7 +8456,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8391,8 +8581,9 @@ spec: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController - persistent disk attached and mounted on kubelets host machine + description: |- + photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine. + Deprecated: PhotonPersistentDisk is deprecated and the in-tree photonPersistentDisk type is no longer supported. properties: fsType: description: |- @@ -8408,8 +8599,11 @@ spec: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached - and mounted on kubelets host machine + description: |- + portworxVolume represents a portworx volume attached and mounted on kubelets host machine. + Deprecated: PortworxVolume is deprecated. All operations for the in-tree portworxVolume type + are redirected to the pxd.portworx.com CSI driver when the CSIMigrationPortworx feature-gate + is on. properties: fsType: description: |- @@ -8682,6 +8876,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8774,8 +9073,9 @@ spec: x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host - that shares a pod's lifetime + description: |- + quobyte represents a Quobyte mount on the host that shares a pod's lifetime. + Deprecated: Quobyte is deprecated and the in-tree quobyte type is no longer supported. properties: group: description: |- @@ -8814,7 +9114,7 @@ spec: rbd: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md + Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. properties: fsType: description: |- @@ -8886,8 +9186,9 @@ spec: - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume - attached and mounted on Kubernetes nodes. + description: |- + scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + Deprecated: ScaleIO is deprecated and the in-tree scaleIO type is no longer supported. properties: fsType: default: xfs @@ -9019,8 +9320,9 @@ spec: type: string type: object storageos: - description: storageOS represents a StorageOS volume attached - and mounted on Kubernetes nodes. + description: |- + storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + Deprecated: StorageOS is deprecated and the in-tree storageos type is no longer supported. properties: fsType: description: |- @@ -9065,8 +9367,10 @@ spec: type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached - and mounted on kubelets host machine + description: |- + vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine. + Deprecated: VsphereVolume is deprecated. All operations for the in-tree vsphereVolume type + are redirected to the csi.vsphere.vmware.com CSI driver. properties: fsType: description: |- diff --git a/bundle/manifests/splunk-operator.clusterserviceversion.yaml b/bundle/manifests/splunk-operator.clusterserviceversion.yaml index 431179e7e..93734eda3 100644 --- a/bundle/manifests/splunk-operator.clusterserviceversion.yaml +++ b/bundle/manifests/splunk-operator.clusterserviceversion.yaml @@ -63,71 +63,169 @@ metadata: "apiVersion": "enterprise.splunk.com/v4", "kind": "ClusterManager", "metadata": { - "name": "clustermanager-sample" + "finalizers": [ + "enterprise.splunk.com/delete-pvc" + ], + "name": "clustermanager-sample", + "namespace": "splunk-operator" }, "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", - "kind": "Database", + "kind": "IndexerCluster", "metadata": { - "labels": { - "app.kubernetes.io/managed-by": "kustomize", - "app.kubernetes.io/name": "splunk-operator" - }, - "name": "database-sample" + "finalizers": [ + "enterprise.splunk.com/delete-pvc" + ], + "name": "indexercluster-sample", + "namespace": "splunk-operator" }, - "spec": null + "spec": { + "clusterManagerRef": { + "name": "clustermanager-sample" + } + } }, { "apiVersion": "enterprise.splunk.com/v4", - "kind": "DatabaseClass", + "kind": "LicenseManager", "metadata": { - "labels": { - "app.kubernetes.io/managed-by": "kustomize", - "app.kubernetes.io/name": "splunk-operator" - }, - "name": "databaseclass-sample" + "finalizers": [ + "enterprise.splunk.com/delete-pvc" + ], + "name": "licensemanager-sample", + "namespace": "splunk-operator" }, - "spec": null + "spec": { + "licenseUrl": "/mnt/licenses/enterprise.lic", + "volumes": [ + { + "configMap": { + "name": "splunk-licenses" + }, + "name": "licenses" + } + ] + } }, { "apiVersion": "enterprise.splunk.com/v4", - "kind": "IndexerCluster", + "kind": "MonitoringConsole", "metadata": { - "name": "indexercluster-sample" + "finalizers": [ + "enterprise.splunk.com/delete-pvc" + ], + "name": "monitoringconsole-sample", + "namespace": "splunk-operator" }, "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", - "kind": "LicenseManager", + "kind": "PostgresCluster", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "splunk-operator" + }, + "name": "postgresql-cluster-dev" + }, + "spec": { + "class": "postgresql-dev" + } + }, + { + "apiVersion": "enterprise.splunk.com/v4", + "kind": "PostgresClusterClass", "metadata": { - "name": "licensemanager-sample" + "name": "postgresql-dev" }, - "spec": null + "spec": { + "cnpg": { + "connectionPooler": { + "config": { + "max_client_conn": "100" + }, + "instances": 2, + "mode": "transaction" + }, + "primaryUpdateMethod": "restart" + }, + "config": { + "connectionPoolerEnabled": true, + "instances": 1, + "postgresVersion": "18", + "resources": { + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } + }, + "storage": "10Gi" + }, + "provisioner": "postgresql.cnpg.io" + } }, { "apiVersion": "enterprise.splunk.com/v4", - "kind": "MonitoringConsole", + "kind": "PostgresDatabase", "metadata": { - "name": "monitoringconsole-sample" + "name": "splunk-databases", + "namespace": "default" }, - "spec": null + "spec": { + "clusterRef": { + "name": "postgresql-cluster-dev" + }, + "databases": [ + { + "deletionPolicy": "Retain", + "extensions": [ + "pg_stat_statements", + "pgcrypto" + ], + "name": "kvstore" + }, + { + "deletionPolicy": "Delete", + "extensions": [ + "pg_trgm" + ], + "name": "analytics" + } + ] + } }, { "apiVersion": "enterprise.splunk.com/v4", "kind": "SearchHeadCluster", "metadata": { - "name": "searchheadcluster-sample" + "finalizers": [ + "enterprise.splunk.com/delete-pvc" + ], + "name": "searchheadcluster-sample", + "namespace": "splunk-operator" }, - "spec": null + "spec": { + "clusterManagerRef": { + "name": "clustermanager-sample" + } + } }, { "apiVersion": "enterprise.splunk.com/v4", "kind": "Standalone", "metadata": { - "name": "standalone-sample" + "finalizers": [ + "enterprise.splunk.com/delete-pvc" + ], + "name": "standalone-sample", + "namespace": "splunk-operator" }, "spec": null } @@ -135,7 +233,7 @@ metadata: capabilities: Seamless Upgrades categories: Big Data, Logging & Tracing, Monitoring, Security, AI/Machine Learning containerImage: splunk/splunk-operator@sha256:c4e0d314622699496f675760aad314520d050a66627fdf33e1e21fa28ca85d50 - createdAt: "2026-01-05T14:32:06Z" + createdAt: "2026-03-23T13:00:36Z" description: The Splunk Operator for Kubernetes enables you to quickly and easily deploy Splunk Enterprise on your choice of private or public cloud provider. The Operator simplifies scaling and management of Splunk Enterprise by automating @@ -166,16 +264,6 @@ spec: - kind: ClusterMaster name: clustermasters.enterprise.splunk.com version: v2 - - description: DatabaseClass is the Schema for the databaseclasses API. - displayName: Database Class - kind: DatabaseClass - name: databaseclasses.enterprise.splunk.com - version: v4 - - description: Database is the Schema for the databases API. - displayName: Database - kind: Database - name: databases.enterprise.splunk.com - version: v4 - description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster displayName: Indexer Cluster kind: IndexerCluster @@ -218,6 +306,23 @@ spec: kind: MonitoringConsole name: monitoringconsoles.enterprise.splunk.com version: v4 + - description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + displayName: Postgres Cluster Class + kind: PostgresClusterClass + name: postgresclusterclasses.enterprise.splunk.com + version: v4 + - description: PostgresCluster is the Schema for the postgresclusters API. + displayName: Postgres Cluster + kind: PostgresCluster + name: postgresclusters.enterprise.splunk.com + version: v4 + - description: PostgresDatabase is the Schema for the postgresdatabases API. + displayName: Postgres Database + kind: PostgresDatabase + name: postgresdatabases.enterprise.splunk.com + version: v4 - description: SearchHeadCluster is the Schema for a Splunk Enterprise search head cluster displayName: Search Head Cluster @@ -264,25 +369,6 @@ spec: spec: clusterPermissions: - rules: - - apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - verbs: - - get - - list - - apiGroups: - - apps - resources: - - statefulsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - "" resources: @@ -304,17 +390,36 @@ spec: - patch - update - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - apiGroups: + - apps + resources: + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - enterprise.splunk.com resources: - clustermanagers - clustermasters - - databaseclasses - - databases - indexerclusters - licensemanagers - licensemasters - monitoringconsoles + - postgresclusters + - postgresdatabases - searchheadclusters - standalones verbs: @@ -330,12 +435,12 @@ spec: resources: - clustermanagers/finalizers - clustermasters/finalizers - - databaseclasses/finalizers - - databases/finalizers - indexerclusters/finalizers - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - postgresclusters/finalizers + - postgresdatabases/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -345,18 +450,47 @@ spec: resources: - clustermanagers/status - clustermasters/status - - databaseclasses/status - - databases/status - indexerclusters/status - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - postgresclusters/status + - postgresdatabases/status - searchheadclusters/status - standalones/status verbs: - get - patch - update + - apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - get + - list + - watch + - apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - databases + - poolers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get - apiGroups: - authentication.k8s.io resources: @@ -415,7 +549,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name - image: localhost:5001/sok-operator-mploski:v1.0.0 + image: controller:latest imagePullPolicy: Always livenessProbe: httpGet: diff --git a/cmd/main.go b/cmd/main.go index e696d9d27..eacb7b9ee 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -48,7 +48,6 @@ import ( enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - enterprisev4 "github.com/splunk/splunk-operator/api/v4" "github.com/splunk/splunk-operator/internal/controller" cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -65,7 +64,6 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(enterpriseApi.AddToScheme(scheme)) utilruntime.Must(enterpriseApiV3.AddToScheme(scheme)) - utilruntime.Must(enterprisev4.AddToScheme(scheme)) utilruntime.Must(cnpgv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme //utilruntime.Must(extapi.AddToScheme(scheme)) diff --git a/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml b/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml index b6c333bd0..70ef3536b 100644 --- a/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml +++ b/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml @@ -245,6 +245,8 @@ spec: - provisioner type: object x-kubernetes-validations: + - message: cnpg config can only be set when provisioner is postgresql.cnpg.io + rule: '!has(self.cnpg) || self.provisioner == ''postgresql.cnpg.io''' - message: cnpg.connectionPooler must be set when config.connectionPoolerEnabled is true rule: '!has(self.config) || !has(self.config.connectionPoolerEnabled) diff --git a/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml b/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml index ae0b2aa2c..14ba142d6 100644 --- a/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml @@ -117,24 +117,24 @@ spec: description: ManagedRole represents a PostgreSQL role to be created and managed in the cluster. properties: - ensure: - default: present - description: Ensure controls whether the role should exist (present) - or not (absent). - enum: - - present - - absent - type: string + exists: + default: true + description: Exists controls whether the role should be present + (true) or absent (false) in PostgreSQL. + type: boolean name: description: Name of the role/user to create. maxLength: 63 minLength: 1 type: string passwordSecretRef: - description: |- - PasswordSecretRef references a Secret containing the password for this role. - The Secret should have a key "password" with the password value. + description: PasswordSecretRef references a Secret and the key + within it containing the password for this role. properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string name: default: "" description: |- @@ -144,6 +144,12 @@ spec: almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object x-kubernetes-map-type: atomic required: @@ -431,10 +437,12 @@ spec: type: object x-kubernetes-map-type: atomic secretRef: - description: |- - SecretRef references the Secret with superuser credentials. - Contains: passwords for superuser + description: SecretKeySelector selects a key of a Secret. properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string name: default: "" description: |- @@ -444,6 +452,12 @@ spec: almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key type: object x-kubernetes-map-type: atomic type: object diff --git a/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml b/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml index 8de8462c3..d8df534d3 100644 --- a/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml +++ b/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml @@ -50,9 +50,8 @@ spec: description: PostgresDatabaseSpec defines the desired state of PostgresDatabase. properties: clusterRef: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. + description: Reference to Postgres Cluster managed by postgresCluster + controller properties: name: default: "" @@ -160,10 +159,12 @@ spec: items: properties: adminUserSecretRef: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. + description: SecretKeySelector selects a key of a Secret. properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string name: default: "" description: |- @@ -173,6 +174,12 @@ spec: almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object x-kubernetes-map-type: atomic configMap: @@ -212,10 +219,12 @@ spec: ready: type: boolean rwUserSecretRef: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. + description: SecretKeySelector selects a key of a Secret. properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string name: default: "" description: |- @@ -225,6 +234,12 @@ spec: almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key type: object x-kubernetes-map-type: atomic type: object diff --git a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml index ad90c9cdb..7810c92dd 100644 --- a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml @@ -28,16 +28,6 @@ spec: kind: ClusterMaster name: clustermasters.enterprise.splunk.com version: v3 - - description: DatabaseClass is the Schema for the databaseclasses API. - displayName: Database Class - kind: DatabaseClass - name: databaseclasses.enterprise.splunk.com - version: v4 - - description: Database is the Schema for the databases API. - displayName: Database - kind: Database - name: databases.enterprise.splunk.com - version: v4 - description: IndexerCluster is the Schema for a Splunk Enterprise indexer cluster displayName: Indexer Cluster kind: IndexerCluster @@ -68,6 +58,23 @@ spec: kind: MonitoringConsole name: monitoringconsoles.enterprise.splunk.com version: v3 + - description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + displayName: Postgres Cluster Class + kind: PostgresClusterClass + name: postgresclusterclasses.enterprise.splunk.com + version: v4 + - description: PostgresCluster is the Schema for the postgresclusters API. + displayName: Postgres Cluster + kind: PostgresCluster + name: postgresclusters.enterprise.splunk.com + version: v4 + - description: PostgresDatabase is the Schema for the postgresdatabases API. + displayName: Postgres Database + kind: PostgresDatabase + name: postgresdatabases.enterprise.splunk.com + version: v4 - description: SearchHeadCluster is the Schema for a Splunk Enterprise search head cluster displayName: Search Head Cluster diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 4ed7851d2..4da1352a1 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -22,13 +22,10 @@ resources: # default, aiding admins in cluster management. Those roles are # not used by the splunk-operator itself. You can comment the following lines # if you do not want those helpers be installed with your Project. -- postgrescluster_admin_role.yaml - postgrescluster_editor_role.yaml - postgrescluster_viewer_role.yaml -- postgresclusterclass_admin_role.yaml - postgresclusterclass_editor_role.yaml - postgresclusterclass_viewer_role.yaml -- postgresdatabase_admin_role.yaml - postgresdatabase_editor_role.yaml - postgresdatabase_viewer_role.yaml diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 4eb884742..b030de4db 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -13,7 +13,7 @@ resources: - enterprise_v4_searchheadcluster.yaml - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml -- enterprise_v4_database.yaml -- enterprise_v4_databaseclass.yaml -- enterprise_v4_postgrescluster.yaml +- enterprise_v4_postgresdatabase.yaml +- enterprise_v4_postgresclusterclass_dev.yaml +- enterprise_v4_postgrescluster_default.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/internal/controller/postgrescluster_controller.go b/internal/controller/postgrescluster_controller.go index 970fe0348..dfa1f7eaf 100644 --- a/internal/controller/postgrescluster_controller.go +++ b/internal/controller/postgrescluster_controller.go @@ -18,49 +18,33 @@ package controller import ( "context" - "fmt" cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" enterprisev4 "github.com/splunk/splunk-operator/api/v4" + clustercore "github.com/splunk/splunk-operator/pkg/postgresql/cluster/core" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" - client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" - logs "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" ) +const ( + ClusterTotalWorker int = 2 +) + // PostgresClusterReconciler reconciles PostgresCluster resources. type PostgresClusterReconciler struct { client.Client Scheme *runtime.Scheme } -// EffectiveClusterConfig holds the effective PostgresCluster spec and CNPG settings after class defaults are applied. -type EffectiveClusterConfig struct { - ClusterSpec *enterprisev4.PostgresClusterSpec - ProvisionerConfig *enterprisev4.CNPGConfig -} - -// normalizedManagedRole holds only the fields this controller sets on a CNPG RoleConfiguration. -// CNPG's admission webhook populates defaults (ConnectionLimit: -1, Inherit: true) that would -// cause equality.Semantic.DeepEqual to always report a diff — we compare only what we own. -type normalizedManagedRole struct { - Name string - Ensure cnpgv1.EnsureOption - Login bool - PasswordSecret string -} - // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters/finalizers,verbs=update @@ -70,1332 +54,11 @@ type normalizedManagedRole struct { // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=poolers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=poolers/status,verbs=get -// Reconcile drives PostgresCluster toward the desired CNPG resources and status. func (r *PostgresClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := logs.FromContext(ctx) - logger.Info("Reconciling PostgresCluster", "name", req.Name, "namespace", req.Namespace) - - var cnpgCluster *cnpgv1.Cluster - var poolerEnabled bool - var postgresSecretName string - secret := &corev1.Secret{} - - // Phase: ResourceFetch - postgresCluster := &enterprisev4.PostgresCluster{} - if getPGClusterErr := r.Get(ctx, req.NamespacedName, postgresCluster); getPGClusterErr != nil { - if apierrors.IsNotFound(getPGClusterErr) { - logger.Info("PostgresCluster deleted, skipping reconciliation") - return ctrl.Result{}, nil - } - logger.Error(getPGClusterErr, "Unable to fetch PostgresCluster") - return ctrl.Result{}, getPGClusterErr - } - persistedStatus := postgresCluster.Status.DeepCopy() - - if postgresCluster.Status.Resources == nil { - postgresCluster.Status.Resources = &enterprisev4.PostgresClusterResources{} - } - - // Keep condition and phase updates consistent across the reconcile flow. - updateStatus := func( - conditionType conditionTypes, - status metav1.ConditionStatus, - reason conditionReasons, - message string, - phase reconcileClusterPhases) { - r.updateStatus(postgresCluster, conditionType, status, reason, message, phase) - } - - // Phase: FinalizerHandling - // Handle deletion before any create or patch path so cleanup wins over reconciliation. - finalizerErr := r.handleFinalizer(ctx, postgresCluster, secret, cnpgCluster) - if finalizerErr != nil { - if apierrors.IsNotFound(finalizerErr) { - logger.Info("PostgresCluster already deleted, skipping finalizer update") - return ctrl.Result{}, nil - } - - logger.Error(finalizerErr, "Failed to handle finalizer") - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonClusterDeleteFailed, - fmt.Sprintf("Failed to delete resources during cleanup: %v", finalizerErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, finalizerErr - } - - if postgresCluster.GetDeletionTimestamp() != nil { - logger.Info("PostgresCluster is being deleted, cleanup complete") - return ctrl.Result{}, nil - } - - // Register the finalizer before creating managed resources. - if !controllerutil.ContainsFinalizer(postgresCluster, postgresClusterFinalizerName) { - controllerutil.AddFinalizer(postgresCluster, postgresClusterFinalizerName) - if updateErr := r.Update(ctx, postgresCluster); updateErr != nil { - if apierrors.IsConflict(updateErr) { - logger.Info("Conflict while adding finalizer, will retry on next reconcile") - return ctrl.Result{Requeue: true}, nil - } - logger.Error(updateErr, "Failed to add finalizer to PostgresCluster") - return ctrl.Result{}, updateErr - } - logger.Info("Finalizer added successfully") - return ctrl.Result{Requeue: true}, nil - } - - // Phase: ClassResolution - postgresClusterClass := &enterprisev4.PostgresClusterClass{} - if getClusterClassErr := r.Get(ctx, client.ObjectKey{Name: postgresCluster.Spec.Class}, postgresClusterClass); getClusterClassErr != nil { - logger.Error(getClusterClassErr, "Unable to fetch referenced PostgresClusterClass", "className", postgresCluster.Spec.Class) - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonClusterClassNotFound, - fmt.Sprintf("ClusterClass %s not found: %v", postgresCluster.Spec.Class, getClusterClassErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, getClusterClassErr - } - - // Phase: ConfigurationMerging - // Merge PostgresCluster overrides on top of PostgresClusterClass defaults. - mergedConfig, mergeErr := r.getMergedConfig(postgresClusterClass, postgresCluster) - if mergeErr != nil { - logger.Error(mergeErr, "Failed to merge PostgresCluster configuration") - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonInvalidConfiguration, - fmt.Sprintf("Failed to merge configuration: %v", mergeErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, mergeErr - } - - // Phase: CredentialProvisioning - // The superuser secret must exist before the CNPG Cluster can be created or updated. - if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.SecretRef != nil { - postgresSecretName = postgresCluster.Status.Resources.SecretRef.Name - logger.Info("Using existing secret from status", "name", postgresSecretName) - } else { - postgresSecretName = fmt.Sprintf("%s%s", postgresCluster.Name, defaultSecretSuffix) - logger.Info("Generating new secret name", "name", postgresSecretName) - } - postgresClusterSecretExists, secretExistErr := r.clusterSecretExists(ctx, postgresCluster.Namespace, postgresSecretName, secret) - if secretExistErr != nil { - logger.Error(secretExistErr, "Failed to check if PostgresCluster secret exists", "name", postgresSecretName) - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonUserSecretFailed, - fmt.Sprintf("Failed to check secret existence: %v", secretExistErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, secretExistErr - } - - if !postgresClusterSecretExists { - logger.Info("Creating PostgresCluster secret", "name", postgresSecretName) - if generateSecretErr := r.generateSecret(ctx, postgresCluster, postgresSecretName); generateSecretErr != nil { - logger.Error(generateSecretErr, "Failed to ensure PostgresCluster secret", "name", postgresSecretName) - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonUserSecretFailed, - fmt.Sprintf("Failed to generate PostgresCluster secret: %v", generateSecretErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, generateSecretErr - } - logger.Info("PostgresCluster secret created successfully", "name", postgresSecretName) - } - - // Re-link an existing secret if its owner reference was removed. - if postgresClusterSecretExists { - restoredSecretOwnerRef, restoreErr := r.restoreOwnerRef(ctx, postgresCluster, secret, "Secret") - if restoreErr != nil { - logger.Error(restoreErr, "Failed to restore owner reference on Secret") - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonSuperUserSecretFailed, - fmt.Sprintf("Failed to link existing secret: %v", restoreErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, restoreErr - } - if restoredSecretOwnerRef { - logger.Info("Existing secret linked successfully") - } - } - - if postgresCluster.Status.Resources == nil { - postgresCluster.Status.Resources = &enterprisev4.PostgresClusterResources{} - } - if postgresCluster.Status.Resources.SecretRef == nil { - postgresCluster.Status.Resources.SecretRef = &corev1.LocalObjectReference{Name: postgresSecretName} - return r.persistStatus(ctx, postgresCluster, persistedStatus) - } - - // Phase: ClusterSpecConstruction - desiredSpec := r.buildCNPGClusterSpec(mergedConfig, postgresSecretName) - - // Phase: ClusterReconciliation - // Create the CNPG Cluster on first reconcile, otherwise compare and patch drift. - existingCNPG := &cnpgv1.Cluster{} - getErr := r.Get(ctx, types.NamespacedName{Name: postgresCluster.Name, Namespace: postgresCluster.Namespace}, existingCNPG) - - if apierrors.IsNotFound(getErr) { - // CNPG Cluster doesn't exist yet. Create it and return so status can be observed on the next pass. - logger.Info("CNPG Cluster not found, creating", "name", postgresCluster.Name) - newCluster := r.buildCNPGCluster(postgresCluster, mergedConfig, postgresSecretName) - if createErr := r.Create(ctx, newCluster); createErr != nil { - logger.Error(createErr, "Failed to create CNPG Cluster") - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonClusterBuildFailed, - fmt.Sprintf("Failed to create CNPG Cluster: %v", createErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, createErr - } - - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonClusterBuildSucceeded, - "CNPG Cluster created", - provisioningClusterPhase) - if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { - return result, persistErr - } - logger.Info("CNPG Cluster created successfully,", "name", postgresCluster.Name) - return ctrl.Result{}, nil - - } - if getErr != nil { - logger.Error(getErr, "Failed to get CNPG Cluster") - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonClusterGetFailed, - fmt.Sprintf("Failed to get CNPG Cluster: %v", getErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, getErr - } - - cnpgCluster = existingCNPG - // Re-link an existing CNPG Cluster if its owner reference was removed. - if restoredClusterOwnerRef, restoreErr := r.restoreOwnerRef(ctx, postgresCluster, cnpgCluster, "CNPGCluster"); restoreErr != nil { - logger.Error(restoreErr, "Failed to restore owner reference on CNPG Cluster") - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonClusterPatchFailed, - fmt.Sprintf("Failed to link existing CNPG Cluster: %v", restoreErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, restoreErr - } else if restoredClusterOwnerRef { - logger.Info("Existing CNPG Cluster linked successfully", "cluster", cnpgCluster.Name) - } - - // Patch the CNPG Cluster when the live spec differs from the desired spec. - currentNormalizedSpec := normalizeCNPGClusterSpec(cnpgCluster.Spec, mergedConfig.ClusterSpec.PostgreSQLConfig) - desiredNormalizedSpec := normalizeCNPGClusterSpec(desiredSpec, mergedConfig.ClusterSpec.PostgreSQLConfig) - - if !equality.Semantic.DeepEqual(currentNormalizedSpec, desiredNormalizedSpec) { - logger.Info("Detected drift in CNPG Cluster spec, patching", "name", cnpgCluster.Name) - originalCluster := cnpgCluster.DeepCopy() - cnpgCluster.Spec = desiredSpec - if patchErr := r.patchObject(ctx, originalCluster, cnpgCluster, "CNPGCluster"); patchErr != nil { - if apierrors.IsConflict(patchErr) { - logger.Info("Conflict occurred while updating CNPG Cluster, requeueing", "name", cnpgCluster.Name) - return ctrl.Result{Requeue: true}, nil - } - logger.Error(patchErr, "Failed to patch CNPG Cluster", "name", cnpgCluster.Name) - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonClusterPatchFailed, - fmt.Sprintf("Failed to patch CNPG Cluster: %v", patchErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, patchErr - } - logger.Info("CNPG Cluster patched successfully", "name", cnpgCluster.Name) - return ctrl.Result{}, nil - } - - // Phase: ManagedRoleReconciliation - if managedRolesErr := r.reconcileManagedRoles(ctx, postgresCluster, cnpgCluster); managedRolesErr != nil { - logger.Error(managedRolesErr, "Failed to reconcile managed roles") - updateStatus( - clusterReady, - metav1.ConditionFalse, - reasonManagedRolesFailed, - fmt.Sprintf("Failed to reconcile managed roles: %v", managedRolesErr), - failedClusterPhase) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, managedRolesErr - } - - // Phase: ClusterStatusProjection - // Project CNPG status before later phase-specific early returns so cluster status stays current. - clusterConditionStatus, clusterReason, clusterMessage, clusterPhase := r.syncStatus(postgresCluster, cnpgCluster) - - logger.Info( - "Mapped CNPG status to PostgresCluster", "cnpgPhase", - cnpgCluster.Status.Phase, "postgresClusterPhase", - clusterPhase, "conditionStatus", - clusterConditionStatus, "reason", - clusterReason, "message", - clusterMessage) - - updateStatus( - clusterReady, - clusterConditionStatus, - clusterReason, - clusterMessage, - clusterPhase, - ) - - // Phase: PoolerReconciliation - poolerEnabled = mergedConfig.ClusterSpec.ConnectionPoolerEnabled != nil && *mergedConfig.ClusterSpec.ConnectionPoolerEnabled - if poolerEnabled { - if mergedConfig.ProvisionerConfig.ConnectionPooler == nil { - logger.Info("Connection pooler enabled but no config found in class or cluster spec", - "class", postgresCluster.Spec.Class, - "cluster", postgresCluster.Name, - ) - updateStatus( - poolerReady, - metav1.ConditionFalse, - reasonPoolerConfigMissing, - fmt.Sprintf("Connection pooler is enabled but no config found in class %q or cluster %q", - postgresCluster.Spec.Class, postgresCluster.Name), - failedClusterPhase, - ) - return r.persistStatus(ctx, postgresCluster, persistedStatus) - } - if createPoolerErr := r.createConnectionPoolers(ctx, postgresCluster, mergedConfig, cnpgCluster); createPoolerErr != nil { - logger.Error(createPoolerErr, "Failed to create connection poolers") - updateStatus( - poolerReady, - metav1.ConditionFalse, - reasonPoolerReconciliationFailed, - fmt.Sprintf("Failed to create connection poolers: %v", createPoolerErr), - failedClusterPhase, - ) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, createPoolerErr - } - if !r.arePoolersReady(ctx, postgresCluster) { - logger.Info("Connection poolers are not ready yet, requeueing") - updateStatus( - poolerReady, - metav1.ConditionFalse, - reasonPoolerCreating, - "Connection poolers are being provisioned", - provisioningClusterPhase, - ) - if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { - return result, persistErr - } - return ctrl.Result{RequeueAfter: retryDelay}, nil - } - - message, err := r.syncPoolerStatus(ctx, postgresCluster) - if err != nil { - updateStatus( - poolerReady, - metav1.ConditionFalse, - reasonPoolerReconciliationFailed, - fmt.Sprintf("Failed to sync pooler status: %v", err), - failedClusterPhase, - ) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, err - } - - updateStatus( - poolerReady, - metav1.ConditionTrue, - reasonAllInstancesReady, - fmt.Sprintf("All connection poolers are ready: %s", message), - clusterPhase, - ) - } else { - if err := r.deleteConnectionPoolers(ctx, postgresCluster); err != nil { - logger.Error(err, "Failed to delete connection poolers") - updateStatus( - poolerReady, - metav1.ConditionFalse, - reasonPoolerReconciliationFailed, - "Failed to delete connection poolers", - failedClusterPhase, - ) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, err - } - if r.poolerExists(ctx, postgresCluster, readWriteEndpoint) || r.poolerExists(ctx, postgresCluster, readOnlyEndpoint) { - updateStatus( - poolerReady, - metav1.ConditionFalse, - reasonPoolerCreating, - "Connection poolers are being deleted", - provisioningClusterPhase, - ) - if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { - return result, persistErr - } - return ctrl.Result{RequeueAfter: retryDelay}, nil - } - postgresCluster.Status.ConnectionPoolerStatus = nil - meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, string(poolerReady)) - } - - // Phase: ConnectionMetadata - // Publish connection details after the cluster and optional poolers reach the desired state. - desiredConfigMap, err := r.generateConfigMap(postgresCluster, postgresSecretName, poolerEnabled) - if err != nil { - logger.Error(err, "Failed to generate ConfigMap") - updateStatus( - configMapReady, - metav1.ConditionFalse, - reasonConfigMapFailed, - fmt.Sprintf("Failed to generate ConfigMap: %v", err), - failedClusterPhase, - ) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, err - } - - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: desiredConfigMap.Name, - Namespace: desiredConfigMap.Namespace, - }, - } - - createOrUpdateResult, err := controllerutil.CreateOrUpdate(ctx, r.Client, configMap, func() error { - configMap.Data = desiredConfigMap.Data - configMap.Labels = desiredConfigMap.Labels - configMap.Annotations = desiredConfigMap.Annotations - - if !metav1.IsControlledBy(configMap, postgresCluster) { - if err := ctrl.SetControllerReference(postgresCluster, configMap, r.Scheme); err != nil { - return fmt.Errorf("set controller reference failed: %w", err) - } - } - return nil - }) - if err != nil { - logger.Error(err, "Failed to reconcile ConfigMap", "name", desiredConfigMap.Name) - updateStatus( - configMapReady, - metav1.ConditionFalse, - reasonConfigMapFailed, - fmt.Sprintf("Failed to reconcile ConfigMap: %v", err), - failedClusterPhase, - ) - _ = r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus) - return ctrl.Result{}, err - } - - switch createOrUpdateResult { - case controllerutil.OperationResultCreated: - logger.Info("ConfigMap created", "name", desiredConfigMap.Name) - case controllerutil.OperationResultUpdated: - logger.Info("ConfigMap updated", "name", desiredConfigMap.Name) - case controllerutil.OperationResultNone: - logger.Info("ConfigMap unchanged", "name", desiredConfigMap.Name) - } - - if postgresCluster.Status.Resources.ConfigMapRef == nil || - postgresCluster.Status.Resources.ConfigMapRef.Name != desiredConfigMap.Name { - postgresCluster.Status.Resources.ConfigMapRef = &corev1.LocalObjectReference{Name: desiredConfigMap.Name} - logger.Info("ConfigMap reference updated in status", "configMap", desiredConfigMap.Name) - } - // Phase: ReadyStatus - // Persist the final ConfigMap status update and finish the reconcile pass. - updateStatus( - configMapReady, - metav1.ConditionTrue, - reasonConfigMapsCreated, - fmt.Sprintf("ConfigMap is ready: %s", desiredConfigMap.Name), - clusterPhase, - ) - if result, persistErr := r.persistStatus(ctx, postgresCluster, persistedStatus); persistErr != nil || result != (ctrl.Result{}) { - return result, persistErr - } - logger.Info("Reconciliation complete") - return ctrl.Result{}, nil -} - -// getMergedConfig applies PostgresClusterClass defaults and validates the required resulting fields. -func (r *PostgresClusterReconciler) getMergedConfig(clusterClass *enterprisev4.PostgresClusterClass, cluster *enterprisev4.PostgresCluster) (*EffectiveClusterConfig, error) { - resultConfig := cluster.Spec.DeepCopy() - classDefaults := clusterClass.Spec.Config - - if resultConfig.Instances == nil { - resultConfig.Instances = classDefaults.Instances - } - if resultConfig.PostgresVersion == nil { - resultConfig.PostgresVersion = classDefaults.PostgresVersion - } - if resultConfig.Resources == nil { - resultConfig.Resources = classDefaults.Resources - } - if resultConfig.Storage == nil { - resultConfig.Storage = classDefaults.Storage - } - if len(resultConfig.PostgreSQLConfig) == 0 { - resultConfig.PostgreSQLConfig = classDefaults.PostgreSQLConfig - } - if len(resultConfig.PgHBA) == 0 { - resultConfig.PgHBA = classDefaults.PgHBA - } - - if resultConfig.Instances == nil || resultConfig.PostgresVersion == nil || resultConfig.Storage == nil { - return nil, fmt.Errorf("invalid configuration for class %s: instances, postgresVersion and storage are required", clusterClass.Name) - } - - if resultConfig.PostgreSQLConfig == nil { - resultConfig.PostgreSQLConfig = make(map[string]string) - } - if resultConfig.PgHBA == nil { - resultConfig.PgHBA = make([]string, 0) - } - if resultConfig.Resources == nil { - resultConfig.Resources = &corev1.ResourceRequirements{} - } - - return &EffectiveClusterConfig{ - ClusterSpec: resultConfig, - ProvisionerConfig: clusterClass.Spec.CNPG, - }, nil -} - -// buildCNPGClusterSpec builds the desired CNPG ClusterSpec from the merged configuration. -// IMPORTANT: any field added here must also be added to normalizedCNPGClusterSpec and normalizeCNPGClusterSpec, -// otherwise it will not be included in drift detection and changes will be silently ignored. -func (r *PostgresClusterReconciler) buildCNPGClusterSpec(mergedConfig *EffectiveClusterConfig, secretName string) cnpgv1.ClusterSpec { - - // 3. Build the Spec - spec := cnpgv1.ClusterSpec{ - ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s", *mergedConfig.ClusterSpec.PostgresVersion), - Instances: int(*mergedConfig.ClusterSpec.Instances), - PostgresConfiguration: cnpgv1.PostgresConfiguration{ - Parameters: mergedConfig.ClusterSpec.PostgreSQLConfig, - PgHBA: mergedConfig.ClusterSpec.PgHBA, - }, - SuperuserSecret: &cnpgv1.LocalObjectReference{ - Name: secretName, - }, - EnableSuperuserAccess: ptr.To(true), - - Bootstrap: &cnpgv1.BootstrapConfiguration{ - InitDB: &cnpgv1.BootstrapInitDB{ - Database: defaultDatabaseName, - Owner: superUsername, - Secret: &cnpgv1.LocalObjectReference{ - Name: secretName, - }, - }, - }, - StorageConfiguration: cnpgv1.StorageConfiguration{ - Size: mergedConfig.ClusterSpec.Storage.String(), - }, - Resources: *mergedConfig.ClusterSpec.Resources, - } - - return spec -} - -// buildCNPGCluster builds the CNPG Cluster object for the merged PostgresCluster configuration. -func (r *PostgresClusterReconciler) buildCNPGCluster( - postgresCluster *enterprisev4.PostgresCluster, - mergedConfig *EffectiveClusterConfig, - secretName string, -) *cnpgv1.Cluster { - cnpgCluster := &cnpgv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: postgresCluster.Name, - Namespace: postgresCluster.Namespace, - }, - Spec: r.buildCNPGClusterSpec(mergedConfig, secretName), - } - ctrl.SetControllerReference(postgresCluster, cnpgCluster, r.Scheme) - return cnpgCluster -} - -// poolerResourceName returns the CNPG Pooler resource name for a given cluster and type (rw/ro). -func poolerResourceName(clusterName, poolerType string) string { - return fmt.Sprintf("%s%s%s", clusterName, defaultPoolerSuffix, poolerType) -} - -// createConnectionPoolers ensures both RW and RO CNPG Pooler resources exist by creating missing poolers. -func (r *PostgresClusterReconciler) createConnectionPoolers( - ctx context.Context, - postgresCluster *enterprisev4.PostgresCluster, - mergedConfig *EffectiveClusterConfig, - cnpgCluster *cnpgv1.Cluster, -) error { - // Ensure the RW pooler exists. - if err := r.createConnectionPooler(ctx, postgresCluster, mergedConfig, cnpgCluster, readWriteEndpoint); err != nil { - return fmt.Errorf("failed to reconcile RW pooler: %w", err) - } - - // Ensure the RO pooler exists. - if err := r.createConnectionPooler(ctx, postgresCluster, mergedConfig, cnpgCluster, readOnlyEndpoint); err != nil { - return fmt.Errorf("failed to reconcile RO pooler: %w", err) - } - - return nil -} - -// poolerExists reports whether the named pooler resource exists. -func (r *PostgresClusterReconciler) poolerExists(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, poolerType string) bool { - pooler := &cnpgv1.Pooler{} - err := r.Get(ctx, types.NamespacedName{ - Name: poolerResourceName(postgresCluster.Name, poolerType), - Namespace: postgresCluster.Namespace, - }, pooler) - - if apierrors.IsNotFound(err) { - return false - } - if err != nil { - logs.FromContext(ctx).Error(err, "Failed to check pooler existence", "type", poolerType) - return false - } - return true -} - -// deleteConnectionPoolers removes RW and RO pooler resources if they exist. -func (r *PostgresClusterReconciler) deleteConnectionPoolers(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster) error { - logger := logs.FromContext(ctx) - - for _, poolerType := range []string{readWriteEndpoint, readOnlyEndpoint} { - poolerName := poolerResourceName(postgresCluster.Name, poolerType) - exists := r.poolerExists(ctx, postgresCluster, poolerType) - if !exists { - continue - } - - pooler := &cnpgv1.Pooler{} - if err := r.Get(ctx, types.NamespacedName{ - Name: poolerName, - Namespace: postgresCluster.Namespace, - }, pooler); err != nil { - if apierrors.IsNotFound(err) { - continue - } - return fmt.Errorf("failed to get pooler %s: %w", poolerName, err) - } - - logger.Info("Deleting CNPG Pooler", "name", poolerName) - if err := r.Delete(ctx, pooler); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to delete pooler %s: %w", poolerName, err) - } - } - return nil -} - -// createConnectionPooler creates a CNPG Pooler resource when it is missing. -// Existing poolers are left unchanged by design. -func (r *PostgresClusterReconciler) createConnectionPooler( - ctx context.Context, - postgresCluster *enterprisev4.PostgresCluster, - mergedConfig *EffectiveClusterConfig, - cnpgCluster *cnpgv1.Cluster, - poolerType string, -) error { - poolerName := poolerResourceName(postgresCluster.Name, poolerType) - - existingPooler := &cnpgv1.Pooler{} - err := r.Get(ctx, types.NamespacedName{ - Name: poolerName, - Namespace: postgresCluster.Namespace, - }, existingPooler) - - if apierrors.IsNotFound(err) { - logs.FromContext(ctx).Info("Creating CNPG Pooler", "name", poolerName, "type", poolerType) - pooler := r.buildCNPGPooler(postgresCluster, mergedConfig, cnpgCluster, poolerType) - return r.Create(ctx, pooler) - } - - return err -} - -// buildCNPGPooler builds the desired CNPG Pooler object for the given pooler type. -func (r *PostgresClusterReconciler) buildCNPGPooler( - postgresCluster *enterprisev4.PostgresCluster, - mergedConfig *EffectiveClusterConfig, - cnpgCluster *cnpgv1.Cluster, - poolerType string, -) *cnpgv1.Pooler { - cfg := mergedConfig.ProvisionerConfig.ConnectionPooler - poolerName := poolerResourceName(postgresCluster.Name, poolerType) - - instances := *cfg.Instances - mode := cnpgv1.PgBouncerPoolMode(*cfg.Mode) - - pooler := &cnpgv1.Pooler{ - ObjectMeta: metav1.ObjectMeta{ - Name: poolerName, - Namespace: postgresCluster.Namespace, - }, - Spec: cnpgv1.PoolerSpec{ - Cluster: cnpgv1.LocalObjectReference{ - Name: cnpgCluster.Name, - }, - Instances: &instances, - Type: cnpgv1.PoolerType(poolerType), - PgBouncer: &cnpgv1.PgBouncerSpec{ - PoolMode: mode, - Parameters: cfg.Config, - }, - }, - } - - ctrl.SetControllerReference(postgresCluster, pooler, r.Scheme) - return pooler -} - -// syncStatus maps CNPG Cluster state onto PostgresCluster status and refreshes ProvisionerRef. -func (r *PostgresClusterReconciler) syncStatus( - postgresCluster *enterprisev4.PostgresCluster, - cnpgCluster *cnpgv1.Cluster, -) (metav1.ConditionStatus, conditionReasons, string, reconcileClusterPhases) { - postgresCluster.Status.ProvisionerRef = &corev1.ObjectReference{ - APIVersion: "postgresql.cnpg.io/v1", - Kind: "Cluster", - Namespace: cnpgCluster.Namespace, - Name: cnpgCluster.Name, - UID: cnpgCluster.UID, - } - - // Map CNPG Phase to PostgresCluster Phase/Conditions - var clusterPhase reconcileClusterPhases - var conditionStatus metav1.ConditionStatus - var reason conditionReasons - var message string - - switch cnpgCluster.Status.Phase { - case cnpgv1.PhaseHealthy: - clusterPhase = readyClusterPhase - conditionStatus = metav1.ConditionTrue - reason = reasonCNPGClusterHealthy - message = "Cluster is up and running" - - case cnpgv1.PhaseFirstPrimary, - cnpgv1.PhaseCreatingReplica, - cnpgv1.PhaseWaitingForInstancesToBeActive: - clusterPhase = provisioningClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGProvisioning - message = fmt.Sprintf("CNPG cluster provisioning: %s", cnpgCluster.Status.Phase) - - case cnpgv1.PhaseSwitchover: - clusterPhase = configuringClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGSwitchover - message = "Cluster changing primary node" - - case cnpgv1.PhaseFailOver: - clusterPhase = configuringClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGFailingOver - message = "Pod missing, need to change primary" - - case cnpgv1.PhaseInplacePrimaryRestart, - cnpgv1.PhaseInplaceDeletePrimaryRestart: - clusterPhase = configuringClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGRestarting - message = fmt.Sprintf("CNPG cluster restarting: %s", cnpgCluster.Status.Phase) - - case cnpgv1.PhaseUpgrade, - cnpgv1.PhaseMajorUpgrade, - cnpgv1.PhaseUpgradeDelayed, - cnpgv1.PhaseOnlineUpgrading: - clusterPhase = configuringClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGUpgrading - message = fmt.Sprintf("CNPG cluster upgrading: %s", cnpgCluster.Status.Phase) - - case cnpgv1.PhaseApplyingConfiguration: - clusterPhase = configuringClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGApplyingConfig - message = "Configuration change is being applied" - - case cnpgv1.PhaseReplicaClusterPromotion: - clusterPhase = configuringClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGPromoting - message = "Replica is being promoted to primary" - - case cnpgv1.PhaseWaitingForUser: - clusterPhase = failedClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGWaitingForUser - message = "Action from the user is required" - - case cnpgv1.PhaseUnrecoverable: - clusterPhase = failedClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGUnrecoverable - message = "Cluster failed, needs manual intervention" - - case cnpgv1.PhaseCannotCreateClusterObjects: - clusterPhase = failedClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGProvisioningFailed - message = "Cluster resources cannot be created" - - case cnpgv1.PhaseUnknownPlugin, - cnpgv1.PhaseFailurePlugin: - clusterPhase = failedClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGPluginError - message = fmt.Sprintf("CNPG plugin error: %s", cnpgCluster.Status.Phase) - - case cnpgv1.PhaseImageCatalogError, - cnpgv1.PhaseArchitectureBinaryMissing: - clusterPhase = failedClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGImageError - message = fmt.Sprintf("CNPG image error: %s", cnpgCluster.Status.Phase) - - case "": - clusterPhase = pendingClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGProvisioning - message = "CNPG cluster is pending creation" - - default: - clusterPhase = provisioningClusterPhase - conditionStatus = metav1.ConditionFalse - reason = reasonCNPGProvisioning - message = fmt.Sprintf("CNPG cluster clusterPhase: %s", cnpgCluster.Status.Phase) - } - return conditionStatus, reason, message, clusterPhase - -} - -// updateStatus is a convenience wrapper that updates a condition and the phase together. -// For cases where you need to update multiple conditions before persisting, use updateCondition instead. -func (r *PostgresClusterReconciler) updateStatus( - postgresCluster *enterprisev4.PostgresCluster, - conditionType conditionTypes, - status metav1.ConditionStatus, - reason conditionReasons, - message string, - phase reconcileClusterPhases, -) { - r.updateCondition(postgresCluster, conditionType, status, reason, message) - postgresCluster.Status.Phase = string(phase) -} - -// updateCondition updates a single status condition in memory without persisting. -// Call persistStatus after updating all desired conditions. -func (r *PostgresClusterReconciler) updateCondition( - postgresCluster *enterprisev4.PostgresCluster, - conditionType conditionTypes, - status metav1.ConditionStatus, - reason conditionReasons, - message string, -) { - meta.SetStatusCondition(&postgresCluster.Status.Conditions, metav1.Condition{ - Type: string(conditionType), - Status: status, - Reason: string(reason), - Message: message, - ObservedGeneration: postgresCluster.Generation, - }) -} - -// persistStatus persists status changes and converts update conflicts into reconcile retries. -func (r *PostgresClusterReconciler) persistStatus( - ctx context.Context, - postgresCluster *enterprisev4.PostgresCluster, - persistedStatus *enterprisev4.PostgresClusterStatus, -) (ctrl.Result, error) { - if persistErr := r.persistStatusIfChanged(ctx, postgresCluster, persistedStatus); persistErr != nil { - if apierrors.IsConflict(persistErr) { - logs.FromContext(ctx).Info("Conflict while updating status, will retry on next reconcile") - return ctrl.Result{Requeue: true}, nil - } - return ctrl.Result{}, persistErr - } - return ctrl.Result{}, nil + return clustercore.PostgresClusterService(ctx, r.Client, r.Scheme, req) } -// persistStatusIfChanged updates status only when it differs from the last persisted snapshot. -func (r *PostgresClusterReconciler) persistStatusIfChanged(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, lastPostgresClusterStatus *enterprisev4.PostgresClusterStatus) error { - if !equality.Semantic.DeepEqual(postgresCluster.Status, *lastPostgresClusterStatus) { - if err := r.Status().Update(ctx, postgresCluster); err != nil { - return err - } - *lastPostgresClusterStatus = *postgresCluster.Status.DeepCopy() - } - return nil -} - -// syncPoolerStatus populates ConnectionPoolerStatus and returns a summary message. -// Callers are responsible for updating PoolerReady after this succeeds. -func (r *PostgresClusterReconciler) syncPoolerStatus(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster) (string, error) { - rwPooler := &cnpgv1.Pooler{} - if rwErr := r.Get(ctx, types.NamespacedName{ - Name: poolerResourceName(postgresCluster.Name, readWriteEndpoint), - Namespace: postgresCluster.Namespace, - }, rwPooler); rwErr != nil { - return "", rwErr - } - - roPooler := &cnpgv1.Pooler{} - if roErr := r.Get(ctx, types.NamespacedName{ - Name: poolerResourceName(postgresCluster.Name, readOnlyEndpoint), - Namespace: postgresCluster.Namespace, - }, roPooler); roErr != nil { - return "", roErr - } - - postgresCluster.Status.ConnectionPoolerStatus = &enterprisev4.ConnectionPoolerStatus{ - Enabled: true, - } - - rwDesired, rwScheduled := r.getPoolerInstanceCount(rwPooler) - roDesired, roScheduled := r.getPoolerInstanceCount(roPooler) - - return fmt.Sprintf("%s: %d/%d, %s: %d/%d", - readWriteEndpoint, rwScheduled, rwDesired, - readOnlyEndpoint, roScheduled, roDesired, - ), nil -} - -// isPoolerReady checks if a pooler has all instances scheduled. -// Note: CNPG PoolerStatus only tracks scheduled instances, not ready pods. -func (r *PostgresClusterReconciler) isPoolerReady(pooler *cnpgv1.Pooler, err error) bool { - if err != nil { - return false - } - desiredInstances := int32(1) - if pooler.Spec.Instances != nil { - desiredInstances = *pooler.Spec.Instances - } - return pooler.Status.Instances >= desiredInstances -} - -// getPoolerInstanceCount returns the number of scheduled instances for a pooler. -func (r *PostgresClusterReconciler) getPoolerInstanceCount(pooler *cnpgv1.Pooler) (desired int32, scheduled int32) { - desired = int32(1) - if pooler.Spec.Instances != nil { - desired = *pooler.Spec.Instances - } - return desired, pooler.Status.Instances -} - -// arePoolersReady checks if both RW and RO poolers have all instances scheduled. -func (r *PostgresClusterReconciler) arePoolersReady(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster) bool { - rwPooler := &cnpgv1.Pooler{} - rwErr := r.Get(ctx, types.NamespacedName{ - Name: poolerResourceName(postgresCluster.Name, readWriteEndpoint), - Namespace: postgresCluster.Namespace, - }, rwPooler) - - roPooler := &cnpgv1.Pooler{} - roErr := r.Get(ctx, types.NamespacedName{ - Name: poolerResourceName(postgresCluster.Name, readOnlyEndpoint), - Namespace: postgresCluster.Namespace, - }, roPooler) - - return r.isPoolerReady(rwPooler, rwErr) && r.isPoolerReady(roPooler, roErr) -} - -// normalizeManagedRole projects a CNPG RoleConfiguration down to only the fields this controller controls. -// CNPG's admission webhook populates defaults on the live object (ConnectionLimit: -1, Inherit: true) -// that are absent from our desired slice — normalizing both sides before comparison prevents a -// permanent diff that would re-patch on every reconcile. -func normalizeManagedRole(r cnpgv1.RoleConfiguration) normalizedManagedRole { - secret := "" - if r.PasswordSecret != nil { - secret = r.PasswordSecret.Name - } - return normalizedManagedRole{ - Name: r.Name, - Ensure: r.Ensure, - Login: r.Login, - PasswordSecret: secret, - } -} - -// normalizeManagedRoles applies normalizeManagedRole to each RoleConfiguration in the slice. -func normalizeManagedRoles(roles []cnpgv1.RoleConfiguration) []normalizedManagedRole { - result := make([]normalizedManagedRole, 0, len(roles)) - for _, r := range roles { - result = append(result, normalizeManagedRole(r)) - } - return result -} - -// buildCNPGRole converts a single PostgresCluster ManagedRole to its CNPG RoleConfiguration equivalent. -// Absent roles are marked for removal; Login is only meaningful for present roles. -func buildCNPGRole(role enterprisev4.ManagedRole) cnpgv1.RoleConfiguration { - cnpgRole := cnpgv1.RoleConfiguration{ - Name: role.Name, - } - if role.Ensure == "absent" { - cnpgRole.Ensure = cnpgv1.EnsureAbsent - } else { - cnpgRole.Ensure = cnpgv1.EnsurePresent - cnpgRole.Login = true - } - if role.PasswordSecretRef != nil { - cnpgRole.PasswordSecret = &cnpgv1.LocalObjectReference{Name: role.PasswordSecretRef.Name} - } - return cnpgRole -} - -// reconcileManagedRoles synchronizes ManagedRoles from PostgresCluster spec to CNPG Cluster managed.roles. -func (r *PostgresClusterReconciler) reconcileManagedRoles(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster) error { - logger := logs.FromContext(ctx) - - desired := make([]cnpgv1.RoleConfiguration, 0, len(postgresCluster.Spec.ManagedRoles)) - for _, role := range postgresCluster.Spec.ManagedRoles { - desired = append(desired, buildCNPGRole(role)) - } - - var current []cnpgv1.RoleConfiguration - if cnpgCluster.Spec.Managed != nil { - current = cnpgCluster.Spec.Managed.Roles - } - - if equality.Semantic.DeepEqual(normalizeManagedRoles(current), normalizeManagedRoles(desired)) { - logger.Info("CNPG Cluster roles already match desired state, no update needed") - return nil - } - - logger.Info("Detected drift in managed roles, patching", "count", len(desired)) - originalCluster := cnpgCluster.DeepCopy() - if cnpgCluster.Spec.Managed == nil { - cnpgCluster.Spec.Managed = &cnpgv1.ManagedConfiguration{} - } - cnpgCluster.Spec.Managed.Roles = desired - - if err := r.patchObject(ctx, originalCluster, cnpgCluster, "CNPGCluster"); err != nil { - return fmt.Errorf("patching managed roles: %w", err) - } - - logger.Info("Successfully updated managed roles", "count", len(desired)) - return nil -} - -// normalizedCNPGClusterSpec is a subset of cnpgv1.ClusterSpec fields that we care about for drift detection. -// Any field that is included in buildCNPGClusterSpec and should be considered for drift detection must be added here, and populated in normalizeCNPGClusterSpec. -func normalizeCNPGClusterSpec(spec cnpgv1.ClusterSpec, customDefinedParameters map[string]string) normalizedCNPGClusterSpec { - normalizedConf := normalizedCNPGClusterSpec{ - ImageName: spec.ImageName, - Instances: spec.Instances, - // Parameters intentionally excluded — CNPG injects defaults that we don't change - StorageSize: spec.StorageConfiguration.Size, - Resources: spec.Resources, - } - - if len(customDefinedParameters) > 0 { - normalizedConf.CustomDefinedParameters = make(map[string]string) - for k := range customDefinedParameters { - normalizedConf.CustomDefinedParameters[k] = spec.PostgresConfiguration.Parameters[k] - } - } - if len(spec.PostgresConfiguration.PgHBA) > 0 { - normalizedConf.PgHBA = spec.PostgresConfiguration.PgHBA - } - - if spec.Bootstrap != nil && spec.Bootstrap.InitDB != nil { - normalizedConf.DefaultDatabase = spec.Bootstrap.InitDB.Database - normalizedConf.Owner = spec.Bootstrap.InitDB.Owner - } - return normalizedConf -} - -// generateConfigMap builds the desired ConfigMap with connection details for the PostgresCluster. -func (r *PostgresClusterReconciler) generateConfigMap( - postgresCluster *enterprisev4.PostgresCluster, - secretName string, - poolerEnabled bool, -) (*corev1.ConfigMap, error) { - configMapName := fmt.Sprintf("%s%s", postgresCluster.Name, defaultConfigMapSuffix) - if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.ConfigMapRef != nil { - configMapName = postgresCluster.Status.Resources.ConfigMapRef.Name - } - - data := map[string]string{ - "CLUSTER_RW_ENDPOINT": fmt.Sprintf("%s-rw.%s", postgresCluster.Name, postgresCluster.Namespace), - "CLUSTER_RO_ENDPOINT": fmt.Sprintf("%s-ro.%s", postgresCluster.Name, postgresCluster.Namespace), - "CLUSTER_R_ENDPOINT": fmt.Sprintf("%s-r.%s", postgresCluster.Name, postgresCluster.Namespace), - "DEFAULT_CLUSTER_PORT": defaultPort, - "SUPER_USER_NAME": superUsername, - "SUPER_USER_SECRET_REF": secretName, - } - - if poolerEnabled { - data["CLUSTER_POOLER_RW_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(postgresCluster.Name, readWriteEndpoint), postgresCluster.Namespace) - data["CLUSTER_POOLER_RO_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(postgresCluster.Name, readOnlyEndpoint), postgresCluster.Namespace) - } - - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: configMapName, - Namespace: postgresCluster.Namespace, - Labels: map[string]string{"app.kubernetes.io/managed-by": "postgrescluster-controller"}, - }, - Data: data, - } - if err := ctrl.SetControllerReference(postgresCluster, configMap, r.Scheme); err != nil { - return nil, fmt.Errorf("failed to set controller reference: %w", err) - } - return configMap, nil -} - -// generateSecret creates the superuser Secret when it is missing. -func (r *PostgresClusterReconciler) generateSecret(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, secretName string) error { - existing := &corev1.Secret{} - err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: postgresCluster.Namespace}, existing) - - // If secret does not exist, create it - if apierrors.IsNotFound(err) { - password, err := generatePassword() - if err != nil { - return err - } - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: postgresCluster.Namespace, - }, - StringData: map[string]string{ - "username": superUsername, - "password": password, - }, - Type: corev1.SecretTypeOpaque, - } - // Set owner reference - if err := ctrl.SetControllerReference(postgresCluster, secret, r.Scheme); err != nil { - return err - } - if err := r.Create(ctx, secret); err != nil { - return err - } - } else if err != nil { - return err - } - return nil -} - -// deleteCNPGCluster deletes the CNPG Cluster resource if it exists. -func (r *PostgresClusterReconciler) deleteCNPGCluster(ctx context.Context, cnpgCluster *cnpgv1.Cluster) error { - logger := logs.FromContext(ctx) - // TODO: add logic to decide to delete cluster if one has customer DBs configured, to prevent data loss - if cnpgCluster == nil { - logger.Info("CNPG Cluster not found, skipping deletion") - return nil - } - logger.Info("Deleting CNPG Cluster", "name", cnpgCluster.Name) - if err := r.Delete(ctx, cnpgCluster); err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("failed to delete CNPG Cluster: %w", err) - } - return nil -} - -// handleFinalizer performs deletion-time cleanup and removes the finalizer when cleanup succeeds. -func (r *PostgresClusterReconciler) handleFinalizer(ctx context.Context, postgresCluster *enterprisev4.PostgresCluster, secret *corev1.Secret, cnpgCluster *cnpgv1.Cluster) error { - logger := logs.FromContext(ctx) - if postgresCluster.GetDeletionTimestamp() == nil { - logger.Info("PostgresCluster not marked for deletion, skipping finalizer logic") - return nil - } - if !controllerutil.ContainsFinalizer(postgresCluster, postgresClusterFinalizerName) { - logger.Info("Finalizer not present on PostgresCluster, skipping finalizer logic") - return nil - } - if cnpgCluster == nil { - cnpgCluster = &cnpgv1.Cluster{} - } - - err := r.Get(ctx, types.NamespacedName{ - Name: postgresCluster.Name, - Namespace: postgresCluster.Namespace, - }, cnpgCluster) - if err != nil { - if apierrors.IsNotFound(err) { - cnpgCluster = nil - logger.Info("CNPG cluster not found during cleanup") - } else { - return fmt.Errorf("failed to fetch CNPG cluster during cleanup: %w", err) - } - } - logger.Info("Processing finalizer cleanup for PostgresCluster") - - // Always delete connection poolers if they exist. - if err := r.deleteConnectionPoolers(ctx, postgresCluster); err != nil { - logger.Error(err, "Failed to delete connection poolers during cleanup") - return fmt.Errorf("failed to delete connection poolers: %w", err) - } - - switch postgresCluster.Spec.ClusterDeletionPolicy { - case clusterDeletionPolicyDelete: - logger.Info("ClusterDeletionPolicy is 'Delete', proceeding to delete CNPG Cluster and associated resources") - if cnpgCluster != nil { - if err := r.deleteCNPGCluster(ctx, cnpgCluster); err != nil { - logger.Error(err, "Failed to delete CNPG Cluster during finalizer cleanup") - return fmt.Errorf("failed to delete CNPG Cluster during finalizer cleanup: %w", err) - } - } - logger.Info("CNPG Cluster not found") - case clusterDeletionPolicyRetain: - logger.Info("ClusterDeletionPolicy is 'Retain', proceeding to remove owner references and retain CNPG Cluster") - // Remove owner reference from CNPG Cluster to prevent its deletion. - if cnpgCluster != nil { - originalCNPG := cnpgCluster.DeepCopy() - refRemoved, err := r.removeOwnerRef(postgresCluster, cnpgCluster, "CNPGCluster") - if err != nil { - return fmt.Errorf("failed to remove owner reference from CNPG cluster: %w", err) - } - if !refRemoved { - logger.Info("Owner reference already removed/not set from CNPG Cluster, skipping patch") - } - if err := r.patchObject(ctx, originalCNPG, cnpgCluster, "CNPGCluster"); err != nil { - return fmt.Errorf("failed to patch CNPG cluster after removing owner reference: %w", err) - } - logger.Info("Removed owner reference from CNPG Cluster") - } - // Remove owner reference from Secret to prevent its deletion. - if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.SecretRef != nil { - secretName := postgresCluster.Status.Resources.SecretRef.Name - if err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: postgresCluster.Namespace}, secret); err != nil { - if !apierrors.IsNotFound(err) { - logger.Error(err, "Failed to fetch Secret during cleanup") - return fmt.Errorf("failed to fetch secret during cleanup: %w", err) - } - logger.Info("Secret not found, skipping owner reference removal", "secret", secretName) - } - if secret != nil { - originalSecret := secret.DeepCopy() - refRemoved, err := r.removeOwnerRef(postgresCluster, secret, "Secret") - if err != nil { - return fmt.Errorf("failed to remove owner reference from Secret: %w", err) - } - if refRemoved { - if err := r.patchObject(ctx, originalSecret, secret, "Secret"); err != nil { - return fmt.Errorf("failed to patch Secret after removing owner reference: %w", err) - } - } - logger.Info("Removed owner reference from Secret") - } - } - default: - logger.Info("Unknown ClusterDeletionPolicy", "policy", postgresCluster.Spec.ClusterDeletionPolicy) - } - - // Remove finalizer after successful cleanup - controllerutil.RemoveFinalizer(postgresCluster, postgresClusterFinalizerName) - if err := r.Update(ctx, postgresCluster); err != nil { - if apierrors.IsNotFound(err) { - logger.Info("PostgresCluster already deleted, skipping finalizer update") - return nil - } - logger.Error(err, "Failed to remove finalizer from PostgresCluster") - return fmt.Errorf("failed to remove finalizer: %w", err) - } - - logger.Info("Finalizer removed, cleanup complete") - return nil -} - -// clusterSecretExists returns whether the secret is present and propagates lookup errors. -func (r *PostgresClusterReconciler) clusterSecretExists(ctx context.Context, namespace, secretName string, secret *corev1.Secret) (clusterSecretExists bool, secretExistErr error) { - logger := logs.FromContext(ctx) - err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: namespace}, secret) - if apierrors.IsNotFound(err) { - return false, nil - } - if err != nil { - logger.Error(err, "Failed to check secret existence", "secret", secretName) - return false, err - } - logger.Info("Secret already exists", "secret", secretName) - return true, nil -} - -// removeOwnerRef removes the owner's reference from the object and reports whether it changed the object. -func (r *PostgresClusterReconciler) removeOwnerRef(owner client.Object, obj client.Object, objKind objectKind) (bool, error) { - hasOwnerRef, err := controllerutil.HasOwnerReference(obj.GetOwnerReferences(), owner, r.Scheme) - - if err != nil { - return false, fmt.Errorf("failed to check owner reference on %s: %w", objKind, err) - } - if !hasOwnerRef { - return false, nil - } - if err := controllerutil.RemoveOwnerReference(owner, obj, r.Scheme); err != nil { - return false, fmt.Errorf("failed to remove owner reference from %s: %w", objKind, err) - } - return true, nil -} - -// restoreOwnerRef adds the PostgresCluster owner reference back to an existing object when it is missing. -func (r *PostgresClusterReconciler) restoreOwnerRef(ctx context.Context, owner client.Object, obj client.Object, objKind objectKind) (bool, error) { - hasOwnerRef, err := controllerutil.HasOwnerReference(obj.GetOwnerReferences(), owner, r.Scheme) - if err != nil { - return false, fmt.Errorf("failed to check owner reference on %s: %w", objKind, err) - } - if hasOwnerRef { - return false, nil - } - - logger := logs.FromContext(ctx) - logger.Info("Connecting existing object to PostgresCluster by adding owner reference", "kind", objKind, "name", obj.GetName()) - - originalObj, ok := obj.DeepCopyObject().(client.Object) - if !ok { - return false, fmt.Errorf("failed to deep copy %s object", objKind) - } - - if err := ctrl.SetControllerReference(owner, obj, r.Scheme); err != nil { - return false, fmt.Errorf("failed to set controller reference on existing %s: %w", objKind, err) - } - - if err := r.patchObject(ctx, originalObj, obj, objKind); err != nil { - return false, err - } - - return true, nil -} - -// patchObject applies a merge patch and treats NotFound as already converged. -func (r *PostgresClusterReconciler) patchObject(ctx context.Context, original client.Object, obj client.Object, objKind objectKind) error { - logger := logs.FromContext(ctx) - if err := r.Patch(ctx, obj, client.MergeFrom(original)); err != nil { - if apierrors.IsNotFound(err) { - logger.Info("Object not found, skipping patch", "kind", objKind, "name", obj.GetName()) - return nil - } - return fmt.Errorf("failed to patch %s object: %w", objKind, err) - } - logger.Info("Patched object successfully", "kind", objKind, "name", obj.GetName()) - return nil -} - -// SetupWithManager registers the controller for PostgresCluster resources and owned CNPG Clusters. +// SetupWithManager registers the controller and owned resource watches. func (r *PostgresClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&enterprisev4.PostgresCluster{}, builder.WithPredicates(postgresClusterPredicator())). @@ -1404,6 +67,9 @@ func (r *PostgresClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.Secret{}, builder.WithPredicates(secretPredicator())). Owns(&corev1.ConfigMap{}, builder.WithPredicates(configMapPredicator())). Named("postgresCluster"). + WithOptions(controller.Options{ + MaxConcurrentReconciles: ClusterTotalWorker, + }). Complete(r) } @@ -1415,44 +81,15 @@ func ownerReferencesChanged(oldObj, newObj metav1.Object) bool { return !equality.Semantic.DeepEqual(oldObj.GetOwnerReferences(), newObj.GetOwnerReferences()) } -// cnpgClusterPredicator filters CNPG Cluster events to only trigger reconciles on creation, deletion, or phase changes. -func cnpgClusterPredicator() predicate.Predicate { - - return predicate.Funcs{ - CreateFunc: func(event.CreateEvent) bool { - return true - }, - DeleteFunc: func(event.DeleteEvent) bool { - return true - }, - UpdateFunc: func(e event.UpdateEvent) bool { - oldObj, oldTypeOK := e.ObjectOld.(*cnpgv1.Cluster) - newObj, newTypeOK := e.ObjectNew.(*cnpgv1.Cluster) - if !oldTypeOK || !newTypeOK { - return true - } - return oldObj.Status.Phase != newObj.Status.Phase || - ownerReferencesChanged(oldObj, newObj) - }, - GenericFunc: func(event.GenericEvent) bool { - return false - }, - } -} - -// postgresClusterPredicator filters PostgresCluster events to trigger reconciles on creation, deletion, generation changes, deletion timestamp changes, or finalizer changes. +// postgresClusterPredicator triggers on generation changes, deletion, and finalizer transitions. func postgresClusterPredicator() predicate.Predicate { return predicate.Funcs{ - CreateFunc: func(event.CreateEvent) bool { - return true - }, - DeleteFunc: func(event.DeleteEvent) bool { - return true - }, + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, UpdateFunc: func(e event.UpdateEvent) bool { - oldObj, oldTypeOK := e.ObjectOld.(*enterprisev4.PostgresCluster) - newObj, newTypeOK := e.ObjectNew.(*enterprisev4.PostgresCluster) - if !oldTypeOK || !newTypeOK { + oldObj, oldOK := e.ObjectOld.(*enterprisev4.PostgresCluster) + newObj, newOK := e.ObjectNew.(*enterprisev4.PostgresCluster) + if !oldOK || !newOK { return true } if oldObj.Generation != newObj.Generation { @@ -1461,75 +98,75 @@ func postgresClusterPredicator() predicate.Predicate { if deletionTimestampChanged(oldObj, newObj) { return true } - if postgresClusterFinalizerName != "" && (controllerutil.ContainsFinalizer(oldObj, postgresClusterFinalizerName) != controllerutil.ContainsFinalizer(newObj, postgresClusterFinalizerName)) { + // Finalizer changes indicate registration or deletion  always reconcile. + return controllerutil.ContainsFinalizer(oldObj, clustercore.PostgresClusterFinalizerName) != + controllerutil.ContainsFinalizer(newObj, clustercore.PostgresClusterFinalizerName) + }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// cnpgClusterPredicator triggers only on phase changes or owner reference changes. +func cnpgClusterPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldOK := e.ObjectOld.(*cnpgv1.Cluster) + newObj, newOK := e.ObjectNew.(*cnpgv1.Cluster) + if !oldOK || !newOK { return true } - return false - }, - GenericFunc: func(event.GenericEvent) bool { - return false + return oldObj.Status.Phase != newObj.Status.Phase || + ownerReferencesChanged(oldObj, newObj) }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } -// cnpgPoolerPredicator filters CNPG Pooler events to trigger reconciles on creation, deletion, or instance count changes. +// cnpgPoolerPredicator triggers only on instance count changes. func cnpgPoolerPredicator() predicate.Predicate { return predicate.Funcs{ - CreateFunc: func(event.CreateEvent) bool { - return true - }, - DeleteFunc: func(event.DeleteEvent) bool { - return true - }, + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, UpdateFunc: func(e event.UpdateEvent) bool { - oldObj, oldTypeOK := e.ObjectOld.(*cnpgv1.Pooler) - newObj, newTypeOK := e.ObjectNew.(*cnpgv1.Pooler) - if !oldTypeOK || !newTypeOK { + oldObj, oldOK := e.ObjectOld.(*cnpgv1.Pooler) + newObj, newOK := e.ObjectNew.(*cnpgv1.Pooler) + if !oldOK || !newOK { return true } return oldObj.Status.Instances != newObj.Status.Instances }, - GenericFunc: func(event.GenericEvent) bool { - return false - }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } -// secretPredicator filters Secret events to trigger reconciles on creation, deletion, or owner reference changes. + +// secretPredicator triggers only on owner reference changes. func secretPredicator() predicate.Predicate { return predicate.Funcs{ - CreateFunc: func(event.CreateEvent) bool { - return true - }, - DeleteFunc: func(event.DeleteEvent) bool { - return true - }, + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, UpdateFunc: func(e event.UpdateEvent) bool { - oldObj, oldTypeOK := e.ObjectOld.(*corev1.Secret) - newObj, newTypeOK := e.ObjectNew.(*corev1.Secret) - if !oldTypeOK || !newTypeOK { + oldObj, oldOK := e.ObjectOld.(*corev1.Secret) + newObj, newOK := e.ObjectNew.(*corev1.Secret) + if !oldOK || !newOK { return true } return ownerReferencesChanged(oldObj, newObj) }, - GenericFunc: func(event.GenericEvent) bool { - return false - }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } -// configMapPredicator filters ConfigMap events to trigger reconciles on creation, deletion, data/label/annotation changes, or owner reference changes. +// configMapPredicator triggers on data, label, annotation, or owner reference changes. func configMapPredicator() predicate.Predicate { return predicate.Funcs{ - CreateFunc: func(event.CreateEvent) bool { - return true - }, - DeleteFunc: func(event.DeleteEvent) bool { - return true - }, + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, UpdateFunc: func(e event.UpdateEvent) bool { - oldObj, oldTypeOK := e.ObjectOld.(*corev1.ConfigMap) - newObj, newTypeOK := e.ObjectNew.(*corev1.ConfigMap) - if !oldTypeOK || !newTypeOK { + oldObj, oldOK := e.ObjectOld.(*corev1.ConfigMap) + newObj, newOK := e.ObjectNew.(*corev1.ConfigMap) + if !oldOK || !newOK { return true } return !equality.Semantic.DeepEqual(oldObj.Data, newObj.Data) || @@ -1537,8 +174,6 @@ func configMapPredicator() predicate.Predicate { !equality.Semantic.DeepEqual(oldObj.Annotations, newObj.Annotations) || ownerReferencesChanged(oldObj, newObj) }, - GenericFunc: func(event.GenericEvent) bool { - return false - }, + GenericFunc: func(event.GenericEvent) bool { return false }, } } diff --git a/internal/controller/postgresdatabase_controller.go b/internal/controller/postgresdatabase_controller.go index 814d9e75c..40faa3eb3 100644 --- a/internal/controller/postgresdatabase_controller.go +++ b/internal/controller/postgresdatabase_controller.go @@ -18,65 +18,36 @@ package controller import ( "context" - "encoding/json" - stderrors "errors" - "fmt" "reflect" - "slices" - "strings" - "time" cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" - "github.com/jackc/pgx/v5" - "github.com/sethvargo/go-password/password" enterprisev4 "github.com/splunk/splunk-operator/api/v4" + dbadapter "github.com/splunk/splunk-operator/pkg/postgresql/database/adapter" + dbcore "github.com/splunk/splunk-operator/pkg/postgresql/database/core" + corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" ) -const ( - secretRoleAdmin = "admin" - secretRoleRW = "rw" - - // Password generation — no symbols for PostgreSQL connection string compatibility. - passwordLength = 32 - passwordDigits = 8 - passwordSymbols = 0 - - // Label keys used on managed secrets. - labelManagedBy = "app.kubernetes.io/managed-by" - labelCNPGReload = "cnpg.io/reload" - - // postgresPort is the standard PostgreSQL port used in all connection strings. - postgresPort = "5432" - - // fieldManagerPrefix is the SSA field manager prefix for PostgresDatabase controllers. - fieldManagerPrefix = "postgresdatabase-" -) - -// fieldManagerName returns the SSA field manager name for a given PostgresDatabase. -func fieldManagerName(postgresDBName string) string { - return fieldManagerPrefix + postgresDBName -} - -// PostgresDatabaseReconciler reconciles a PostgresDatabase object +// PostgresDatabaseReconciler reconciles a PostgresDatabase object. type PostgresDatabaseReconciler struct { client.Client Scheme *runtime.Scheme } +const ( + DatabaseTotalWorker int = 2 +) + //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases/status,verbs=get;update;patch //+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases/finalizers,verbs=update @@ -88,950 +59,20 @@ type PostgresDatabaseReconciler struct { func (r *PostgresDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logger := log.FromContext(ctx) - logger.Info("Reconciling PostgresDatabase", "name", req.Name, "namespace", req.Namespace) postgresDB := &enterprisev4.PostgresDatabase{} if err := r.Get(ctx, req.NamespacedName, postgresDB); err != nil { - if errors.IsNotFound(err) { + if apierrors.IsNotFound(err) { logger.Info("PostgresDatabase resource not found, ignoring") return ctrl.Result{}, nil } - logger.Error(err, "Failed to get PostgresDatabase", "name", req.Name) return ctrl.Result{}, err } - logger.Info("PostgresDatabase CR Fetched successfully", "generation", postgresDB.Generation) - - // Closure captures postgresDB so call sites don't repeat it on every status update. - updateStatus := func(conditionType conditionTypes, conditionStatus metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileDBPhases) error { - return r.updateStatus(ctx, postgresDB, conditionType, conditionStatus, reason, message, phase) - } - - // Handle finalizer: cleanup on deletion, register on creation - if postgresDB.GetDeletionTimestamp() != nil { - if err := r.handleDeletion(ctx, postgresDB); err != nil { - logger.Error(err, "Cleanup failed for PostgresDatabase") - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - } - if !controllerutil.ContainsFinalizer(postgresDB, postgresDatabaseFinalizerName) { - controllerutil.AddFinalizer(postgresDB, postgresDatabaseFinalizerName) - if err := r.Update(ctx, postgresDB); err != nil { - logger.Error(err, "Failed to add finalizer to PostgresDatabase") - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - } - - // ObservedGeneration is only written when all phases complete successfully, - // so equality means nothing changed and there is no pending work. - if postgresDB.Status.ObservedGeneration == postgresDB.Generation { - logger.Info("Spec unchanged and all phases complete, skipping") - return ctrl.Result{}, nil - } - logger.Info("Changes to resource detected, reconciling...") - - // Phase: ClusterValidation - var cluster *enterprisev4.PostgresCluster - var clusterStatus clusterReadyStatus - var err error - - cluster, clusterStatus, err = r.ensureClusterReady(ctx, postgresDB) - if err != nil { - if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterInfoFetchFailed, "Can't reach Cluster CR due to transient errors", pendingDBPhase); statusErr != nil { - logger.Error(statusErr, "Failed to update status") - } - return ctrl.Result{}, err - } - logger.Info("Cluster validation done", "clusterName", postgresDB.Spec.ClusterRef.Name, "status", clusterStatus) - - switch clusterStatus { - case ClusterNotFound: - if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterNotFound, "Cluster CR not found", pendingDBPhase); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{RequeueAfter: clusterNotFoundRetryDelay}, nil - - case ClusterNotReady, ClusterNoProvisionerRef: - if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterProvisioning, "Cluster is not in ready state yet", pendingDBPhase); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{RequeueAfter: retryDelay}, nil - - case ClusterReady: - if err := updateStatus(clusterReady, metav1.ConditionTrue, reasonClusterAvailable, "Cluster is operational", provisioningDBPhase); err != nil { - return ctrl.Result{}, err - } - } - - // Phase: RoleConflictCheck — before creating any resources, verify no other - // field manager already owns the same roles via SSA. - roleConflicts := getRoleConflicts(postgresDB, cluster) - if len(roleConflicts) > 0 { - conflictMsg := fmt.Sprintf("Role conflict: %s. "+ - "If you deleted a previous PostgresDatabase, recreate it with the original name to re-adopt the orphaned resources.", - strings.Join(roleConflicts, ", ")) - logger.Error(nil, conflictMsg) - if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonRoleConflict, conflictMsg, failedDBPhase); statusErr != nil { - logger.Error(statusErr, "Failed to update status") - } - return ctrl.Result{}, nil - } - - // We need the CNPG Cluster directly because PostgresCluster status does not yet - // surface managed role reconciliation state — tracked as a future abstraction improvement. - cnpgCluster := &cnpgv1.Cluster{} - if err := r.Get(ctx, types.NamespacedName{ - Name: cluster.Status.ProvisionerRef.Name, - Namespace: cluster.Status.ProvisionerRef.Namespace, - }, cnpgCluster); err != nil { - logger.Error(err, "Failed to fetch CNPG Cluster") - return ctrl.Result{}, err - } - - // Phase: CredentialProvisioning — secrets must exist before roles are patched, - // CNPG rejects a PasswordSecretRef pointing at a missing secret. - if err := r.reconcileUserSecrets(ctx, postgresDB); err != nil { - if statusErr := updateStatus(secretsReady, metav1.ConditionFalse, reasonSecretsCreationFailed, - fmt.Sprintf("Failed to reconcile user secrets: %v", err), provisioningDBPhase); statusErr != nil { - logger.Error(statusErr, "Failed to update status") - } - return ctrl.Result{}, err - } - if err := updateStatus(secretsReady, metav1.ConditionTrue, reasonSecretsCreated, - fmt.Sprintf("All secrets provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { - return ctrl.Result{}, err - } - - // Phase: ConnectionMetadata — ConfigMaps carry connection info consumers need as soon as - // databases are ready, so they are created alongside secrets before any role or database work begins. - endpoints := resolveClusterEndpoints(cluster, cnpgCluster, postgresDB.Namespace) - if err := r.reconcileRoleConfigMaps(ctx, postgresDB, endpoints); err != nil { - if statusErr := updateStatus(configMapsReady, metav1.ConditionFalse, reasonConfigMapsCreationFailed, - fmt.Sprintf("Failed to reconcile ConfigMaps: %v", err), provisioningDBPhase); statusErr != nil { - logger.Error(statusErr, "Failed to update status") - } - return ctrl.Result{}, err - } - if err := updateStatus(configMapsReady, metav1.ConditionTrue, reasonConfigMapsCreated, - fmt.Sprintf("All ConfigMaps provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { - return ctrl.Result{}, err - } - - // Phase: RoleProvisioning - desiredUsers := getDesiredUsers(postgresDB) - actualRolesInSpec := getUsersInClusterSpec(cluster) - var missingRolesFromSpec []string - for _, role := range desiredUsers { - if !slices.Contains(actualRolesInSpec, role) { - missingRolesFromSpec = append(missingRolesFromSpec, role) - } - } - - if len(missingRolesFromSpec) > 0 { - logger.Info("User spec changed, patching CNPG Cluster", "missing", missingRolesFromSpec) - if err := r.patchManagedRoles(ctx, postgresDB, cluster); err != nil { - logger.Error(err, "Failed to patch users in CNPG Cluster") - return ctrl.Result{}, err - } - // Spec updated, requeue to check status - if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, fmt.Sprintf("Waiting for %d roles to be reconciled", len(desiredUsers)), provisioningDBPhase); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{RequeueAfter: retryDelay}, nil - } - - notReadyRoles, err := r.verifyRolesReady(ctx, desiredUsers, cnpgCluster) - if err != nil { - if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonUsersCreationFailed, fmt.Sprintf("Role creation failed: %v", err), failedDBPhase); statusErr != nil { - logger.Error(statusErr, "Failed to update status") - } - return ctrl.Result{}, err - } - - if len(notReadyRoles) > 0 { - if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, fmt.Sprintf("Waiting for roles to be reconciled: %v", notReadyRoles), provisioningDBPhase); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{RequeueAfter: retryDelay}, nil - } - - // All users present in spec and reconciled in status - if err := updateStatus(rolesReady, metav1.ConditionTrue, reasonUsersAvailable, fmt.Sprintf("All %d users in PostgreSQL", len(desiredUsers)), provisioningDBPhase); err != nil { - return ctrl.Result{}, err - } - - // Phase: DatabaseProvisioning - if err := r.reconcileCNPGDatabases(ctx, postgresDB, cluster); err != nil { - logger.Error(err, "Failed to reconcile CNPG Databases") - return ctrl.Result{}, err - } - - notReadyDatabases, err := r.verifyDatabasesReady(ctx, postgresDB) - if err != nil { - logger.Error(err, "Failed to verify database status") - return ctrl.Result{}, err - } - - if len(notReadyDatabases) > 0 { - if err := updateStatus(databasesReady, metav1.ConditionFalse, reasonWaitingForCNPG, - fmt.Sprintf("Waiting for databases to be ready: %v", notReadyDatabases), provisioningDBPhase); err != nil { - return ctrl.Result{}, err - } - return ctrl.Result{RequeueAfter: retryDelay}, nil - } - if err := updateStatus(databasesReady, metav1.ConditionTrue, reasonDatabasesAvailable, fmt.Sprintf("All %d databases ready", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { - return ctrl.Result{}, err - } - - // Phase: RWRolePrivileges - // Skipped when no new databases are detected — ALTER DEFAULT PRIVILEGES covers tables - // added by migrations on existing databases. Re-runs for all databases when a new one - // is added (idempotent for existing ones, required for the new one). - if hasNewDatabases(postgresDB) { - if cluster.Status.Resources == nil || cluster.Status.Resources.SecretRef == nil { - return ctrl.Result{}, fmt.Errorf("PostgresCluster %s has no superuser secret in status yet", cluster.Name) - } - superSecret := &corev1.Secret{} - if err := r.Get(ctx, types.NamespacedName{ - Name: cluster.Status.Resources.SecretRef.Name, - Namespace: postgresDB.Namespace, - }, superSecret); err != nil { - return ctrl.Result{}, fmt.Errorf("fetching superuser secret %s: %w", cluster.Status.Resources.SecretRef.Name, err) - } - - dbNames := make([]string, 0, len(postgresDB.Spec.Databases)) - for _, dbSpec := range postgresDB.Spec.Databases { - dbNames = append(dbNames, dbSpec.Name) - } - - password, ok := superSecret.Data["password"] - if !ok { - return ctrl.Result{}, fmt.Errorf("superuser secret %s missing 'password' key", cluster.Status.Resources.SecretRef.Name) - } - - if err := reconcileRWRolePrivileges(ctx, endpoints.RWHost, string(password), dbNames); err != nil { - if statusErr := updateStatus(privilegesReady, metav1.ConditionFalse, reasonPrivilegesGrantFailed, - fmt.Sprintf("Failed to grant RW role privileges: %v", err), provisioningDBPhase); statusErr != nil { - logger.Error(statusErr, "Failed to update status") - } - return ctrl.Result{}, err - } - if err := updateStatus(privilegesReady, metav1.ConditionTrue, reasonPrivilegesGranted, - fmt.Sprintf("RW role privileges granted for all %d databases", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { - return ctrl.Result{}, err - } - } - - postgresDB.Status.Databases = populateDatabaseStatus(postgresDB) - postgresDB.Status.ObservedGeneration = postgresDB.Generation - - logger.Info("All phases complete") - return ctrl.Result{}, nil -} - -// ensureClusterReady checks if the referenced PostgresCluster exists and is ready -// Returns: cluster (if found), status, error (API errors only) -func (r *PostgresDatabaseReconciler) ensureClusterReady( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, -) (*enterprisev4.PostgresCluster, clusterReadyStatus, error) { - logger := log.FromContext(ctx) - - cluster := &enterprisev4.PostgresCluster{} - if err := r.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { - if errors.IsNotFound(err) { - return nil, ClusterNotFound, nil - } - logger.Error(err, "Failed to fetch Cluster", "name", postgresDB.Spec.ClusterRef.Name) - return nil, ClusterNotReady, err - } - - if cluster.Status.Phase != string(ClusterReady) { - logger.Info("Cluster not ready", "status", cluster.Status.Phase) - return cluster, ClusterNotReady, nil - } - - if cluster.Status.ProvisionerRef == nil { - logger.Info("Cluster has no ProvisionerRef yet", "cluster", cluster.Name) - return cluster, ClusterNoProvisionerRef, nil - } - - return cluster, ClusterReady, nil -} - -// getDesiredUsers builds the list of users we want to create for this PostgresDatabase -func getDesiredUsers(postgresDB *enterprisev4.PostgresDatabase) []string { - users := make([]string, 0, len(postgresDB.Spec.Databases)*2) - for _, dbSpec := range postgresDB.Spec.Databases { - users = append(users, adminRoleName(dbSpec.Name), rwRoleName(dbSpec.Name)) - } - return users -} - -// getUsersInClusterSpec checks our PostgresCluster CR rather than the CNPG Cluster -// because the database controller owns PostgresCluster.spec.managedRoles via SSA — -// CNPG may have roles from other sources that we must not treat as our own. -// Name-only comparison is sufficient: PasswordSecretRef is always set in the same -// reconcile that creates the role, so a role present by name already carries the correct ref. -func getUsersInClusterSpec(cluster *enterprisev4.PostgresCluster) []string { - users := make([]string, 0, len(cluster.Spec.ManagedRoles)) - for _, role := range cluster.Spec.ManagedRoles { - users = append(users, role.Name) - } - return users -} - -// getRoleConflicts checks ManagedFields on the PostgresCluster to detect if any roles -// this PostgresDatabase wants to own are already claimed by a different SSA field manager. -func getRoleConflicts(postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) []string { - myManager := fieldManagerName(postgresDB.Name) - - desired := make(map[string]struct{}, len(postgresDB.Spec.Databases)*2) - for _, dbSpec := range postgresDB.Spec.Databases { - desired[adminRoleName(dbSpec.Name)] = struct{}{} - desired[rwRoleName(dbSpec.Name)] = struct{}{} - } - - roleOwners := managedRoleOwners(cluster.ManagedFields) - - var conflicts []string - for roleName := range desired { - owner, exists := roleOwners[roleName] - if exists && owner != myManager { - conflicts = append(conflicts, fmt.Sprintf("%s (owned by %s)", roleName, owner)) - } - } - return conflicts -} - -// managedRoleOwners builds a map of role name → field manager from ManagedFields. -func managedRoleOwners(managedFields []metav1.ManagedFieldsEntry) map[string]string { - owners := make(map[string]string) - for _, mf := range managedFields { - if mf.FieldsV1 == nil { - continue - } - for _, name := range parseRoleNames(mf.FieldsV1.Raw) { - owners[name] = mf.Manager - } - } - return owners -} - -// parseRoleNames extracts role names from FieldsV1 JSON by walking -// f:spec → f:managedRoles → k:{"name":""}. -func parseRoleNames(raw []byte) []string { - var fields map[string]any - if err := json.Unmarshal(raw, &fields); err != nil { - return nil - } - - spec, _ := fields["f:spec"].(map[string]any) - roles, _ := spec["f:managedRoles"].(map[string]any) - - var names []string - for key := range roles { - var k struct{ Name string } - if err := json.Unmarshal([]byte(strings.TrimPrefix(key, "k:")), &k); err == nil && k.Name != "" { - names = append(names, k.Name) - } - } - return names -} - -// patchManagedRoles patches PostgresCluster.spec.managedRoles via SSA using an unstructured patch. -// Using unstructured avoids the zero-value problem: typed Go structs serialize required fields -// (e.g. spec.class) as "" even when unset, causing SSA to claim ownership and conflict. -// An unstructured map contains ONLY the keys we explicitly set — nothing else leaks. -// PostgresCluster controller will then diff and reconcile these roles to CNPG Cluster. -func (r *PostgresDatabaseReconciler) patchManagedRoles( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - cluster *enterprisev4.PostgresCluster, -) error { - logger := log.FromContext(ctx) - - // Build roles — name, ensure, and PasswordSecretRef pointing to the pre-created secrets. - // Secrets are guaranteed to exist at this point because Phase 2a (reconcileUserSecrets) - // runs before patchManagedRoles in the reconciliation loop. - allRoles := make([]enterprisev4.ManagedRole, 0, len(postgresDB.Spec.Databases)*2) - for _, dbSpec := range postgresDB.Spec.Databases { - allRoles = append(allRoles, - enterprisev4.ManagedRole{ - Name: adminRoleName(dbSpec.Name), - Ensure: "present", - PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)}, - }, - enterprisev4.ManagedRole{ - Name: rwRoleName(dbSpec.Name), - Ensure: "present", - PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)}, - }) - } - - // Construct a minimal unstructured patch — only spec.managedRoles is present. - // No other spec fields (class, storage, instances...) are included, so SSA - // will only claim ownership of the roles we explicitly list. - rolePatch := &unstructured.Unstructured{ - Object: map[string]any{ - "apiVersion": cluster.APIVersion, - "kind": cluster.Kind, - "metadata": map[string]any{ - "name": cluster.Name, - "namespace": cluster.Namespace, - }, - "spec": map[string]any{ - "managedRoles": allRoles, - }, - }, - } - - fieldManager := fieldManagerName(postgresDB.Name) - if err := r.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManager)); err != nil { - logger.Error(err, "Failed to add users to PostgresCluster", "postgresDatabase", postgresDB.Name) - return fmt.Errorf("failed to patch managed roles for PostgresDatabase %s: %w", postgresDB.Name, err) - } - logger.Info("Users added to PostgresCluster via SSA", "postgresDatabase", postgresDB.Name, "postgresCluster", cluster.Name, "roleCount", len(allRoles)) - - return nil -} - -// verifyRolesReady checks if CNPG has finished creating the users. -func (r *PostgresDatabaseReconciler) verifyRolesReady( - ctx context.Context, - expectedUsers []string, - cnpgCluster *cnpgv1.Cluster, -) ([]string, error) { - logger := log.FromContext(ctx) - - if cnpgCluster.Status.ManagedRolesStatus.CannotReconcile != nil { - for _, userName := range expectedUsers { - if errs, exists := cnpgCluster.Status.ManagedRolesStatus.CannotReconcile[userName]; exists { - logger.Error(nil, "User reconciliation failed permanently", "user", userName, "errors", errs) - return nil, fmt.Errorf("user %s reconciliation failed: %v", userName, errs) - } - } - } - - reconciledUsers := cnpgCluster.Status.ManagedRolesStatus.ByStatus[cnpgv1.RoleStatusReconciled] - var notReady []string - for _, userName := range expectedUsers { - if !slices.Contains(reconciledUsers, userName) { - notReady = append(notReady, userName) - } - } - - if len(notReady) > 0 { - logger.Info("Users not reconciled yet", "pending", notReady) - } else { - logger.Info("All users reconciled") - } - return notReady, nil -} - -// reconcileCNPGDatabases creates or updates CNPG Database CRs for each database in the spec. -func (r *PostgresDatabaseReconciler) reconcileCNPGDatabases( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - cluster *enterprisev4.PostgresCluster, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range postgresDB.Spec.Databases { - logger.Info("Processing database", "database", dbSpec.Name) - - cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) - - // reclaimPolicy controls whether CNPG physically drops the PostgreSQL database - // when the CR is deleted — a destructive and irreversible operation. - reclaimPolicy := cnpgv1.DatabaseReclaimDelete - if dbSpec.DeletionPolicy == deletionPolicyRetain { - reclaimPolicy = cnpgv1.DatabaseReclaimRetain - } - - cnpgDB := &cnpgv1.Database{ - ObjectMeta: metav1.ObjectMeta{ - Name: cnpgDBName, - Namespace: postgresDB.Namespace, - }, - } - _, err := controllerutil.CreateOrUpdate(ctx, r.Client, cnpgDB, func() error { - - spec := cnpgv1.DatabaseSpec{ - Name: dbSpec.Name, - Owner: adminRoleName(dbSpec.Name), - ClusterRef: corev1.LocalObjectReference{ - Name: cluster.Status.ProvisionerRef.Name, - }, - ReclaimPolicy: reclaimPolicy, - } - cnpgDB.Spec = spec - - reAdopting := cnpgDB.Annotations[annotationRetainedFrom] == postgresDB.Name - if reAdopting { - logger.Info("Re-adopting orphaned CNPG Database", "name", cnpgDBName) - delete(cnpgDB.Annotations, annotationRetainedFrom) - } - // Set ownerRef on creation or re-adoption - if cnpgDB.CreationTimestamp.IsZero() || reAdopting { - if err := controllerutil.SetControllerReference(postgresDB, cnpgDB, r.Scheme); err != nil { - logger.Error(err, "Failed to set owner reference") - return err - } - } - return nil - }) - if err != nil { - logger.Error(err, "Failed to create CNPG Database", "name", cnpgDBName) - return fmt.Errorf("failed to create CNPG Database %s: %w", cnpgDBName, err) - } - logger.Info("CNPG Database created/updated successfully", "database", dbSpec.Name) - } - return nil -} - -// verifyDatabasesReady checks if CNPG has finished provisioning the databases. -// All databases are checked before returning so the caller gets a complete picture, -// consistent with verifyRolesReady. -func (r *PostgresDatabaseReconciler) verifyDatabasesReady( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, -) ([]string, error) { - logger := log.FromContext(ctx) - - var notReady []string - for _, dbSpec := range postgresDB.Spec.Databases { - cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) - - cnpgDB := &cnpgv1.Database{} - if err := r.Get(ctx, types.NamespacedName{ - Name: cnpgDBName, - Namespace: postgresDB.Namespace, - }, cnpgDB); err != nil { - logger.Error(err, "Failed to get CNPG Database status", "database", dbSpec.Name) - return nil, fmt.Errorf("failed to get CNPG Database %s: %w", cnpgDBName, err) - } - - if cnpgDB.Status.Applied == nil || !*cnpgDB.Status.Applied { - notReady = append(notReady, dbSpec.Name) - } - } - return notReady, nil -} - -// updateStatus sets a condition and phase on the PostgresDatabase status in a single write — -// callers should not call r.Status().Update() directly. -func (r *PostgresDatabaseReconciler) updateStatus( - ctx context.Context, - db *enterprisev4.PostgresDatabase, - conditionType conditionTypes, - conditionStatus metav1.ConditionStatus, - reason conditionReasons, - message string, - phase reconcileDBPhases, -) error { - meta.SetStatusCondition(&db.Status.Conditions, metav1.Condition{ - Type: string(conditionType), - Status: conditionStatus, - Reason: string(reason), - Message: message, - ObservedGeneration: db.Generation, - }) - db.Status.Phase = string(phase) - return r.Status().Update(ctx, db) -} - -// deletionPlan separates databases by their DeletionPolicy for the cleanup workflow. -type deletionPlan struct { - retained []enterprisev4.DatabaseDefinition - deleted []enterprisev4.DatabaseDefinition -} - -// buildDeletionPlan splits databases into retained and deleted groups. -func buildDeletionPlan(databases []enterprisev4.DatabaseDefinition) deletionPlan { - var plan deletionPlan - for _, db := range databases { - if db.DeletionPolicy == deletionPolicyRetain { - plan.retained = append(plan.retained, db) - } else { - plan.deleted = append(plan.deleted, db) - } - } - return plan -} - -// handleDeletion orchestrates the cleanup workflow for a PostgresDatabase being deleted. -func (r *PostgresDatabaseReconciler) handleDeletion(ctx context.Context, postgresDB *enterprisev4.PostgresDatabase) error { - plan := buildDeletionPlan(postgresDB.Spec.Databases) - - if err := r.orphanRetainedResources(ctx, postgresDB, plan.retained); err != nil { - return err - } - if err := r.deleteRemovedResources(ctx, postgresDB, plan.deleted); err != nil { - return err - } - if err := r.cleanupManagedRoles(ctx, postgresDB, plan); err != nil { - return err - } - - controllerutil.RemoveFinalizer(postgresDB, postgresDatabaseFinalizerName) - if err := r.Update(ctx, postgresDB); err != nil { - if errors.IsNotFound(err) { - return nil - } - return fmt.Errorf("failed to remove finalizer: %w", err) - } - - log.FromContext(ctx).Info("Cleanup complete for PostgresDatabase", - "name", postgresDB.Name, - "retained", len(plan.retained), - "deleted", len(plan.deleted)) - return nil -} - -// orphanRetainedResources strips ownerRefs and adds retention annotations -// so resources survive the parent's deletion and can be re-adopted later. -func (r *PostgresDatabaseReconciler) orphanRetainedResources( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - retained []enterprisev4.DatabaseDefinition, -) error { - if err := r.orphanCNPGDatabases(ctx, postgresDB, retained); err != nil { - return err - } - if err := r.orphanConfigMaps(ctx, postgresDB, retained); err != nil { - return err - } - if err := r.orphanSecrets(ctx, postgresDB, retained); err != nil { - return err - } - return nil -} - -// deleteRemovedResources deletes CNPG Databases, ConfigMaps, and Secrets -// for databases with the Delete policy. -func (r *PostgresDatabaseReconciler) deleteRemovedResources( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - deleted []enterprisev4.DatabaseDefinition, -) error { - if err := r.deleteCNPGDatabases(ctx, postgresDB, deleted); err != nil { - return err - } - if err := r.deleteConfigMaps(ctx, postgresDB, deleted); err != nil { - return err - } - if err := r.deleteSecrets(ctx, postgresDB, deleted); err != nil { - return err - } - return nil -} - -// cleanupManagedRoles releases SSA ownership of deleted databases' roles. -// When all databases are retained, roles stay as-is under our field manager. -func (r *PostgresDatabaseReconciler) cleanupManagedRoles( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - plan deletionPlan, -) error { - if len(plan.deleted) == 0 { - return nil - } - cluster := &enterprisev4.PostgresCluster{} - if err := r.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { - if !errors.IsNotFound(err) { - return fmt.Errorf("failed to get PostgresCluster for role cleanup: %w", err) - } - log.FromContext(ctx).Info("PostgresCluster already deleted, skipping role cleanup") - return nil - } - return r.patchManagedRolesOnDeletion(ctx, postgresDB, cluster, plan.retained) -} - -// orphanCNPGDatabases strips ownerReferences and adds a retention annotation -// on CNPG Database CRs for the given databases. -func (r *PostgresDatabaseReconciler) orphanCNPGDatabases( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - databases []enterprisev4.DatabaseDefinition, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range databases { - cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) - db := &cnpgv1.Database{} - if err := r.Get(ctx, types.NamespacedName{Name: cnpgDBName, Namespace: postgresDB.Namespace}, db); err != nil { - if errors.IsNotFound(err) { - continue - } - return fmt.Errorf("failed to get CNPG Database %s for orphaning: %w", cnpgDBName, err) - } - if db.Annotations[annotationRetainedFrom] == postgresDB.Name { - continue - } - stripOwnerReference(db, postgresDB.UID) - if db.Annotations == nil { - db.Annotations = make(map[string]string) - } - db.Annotations[annotationRetainedFrom] = postgresDB.Name - if err := r.Update(ctx, db); err != nil { - return fmt.Errorf("failed to orphan CNPG Database %s: %w", cnpgDBName, err) - } - logger.Info("Orphaned CNPG Database CR", "name", cnpgDBName) - } - return nil -} - -// orphanConfigMaps strips ownerReferences and adds a retention annotation -// on ConfigMaps for the given databases. -func (r *PostgresDatabaseReconciler) orphanConfigMaps( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - databases []enterprisev4.DatabaseDefinition, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range databases { - cmName := configMapName(postgresDB.Name, dbSpec.Name) - cm := &corev1.ConfigMap{} - if err := r.Get(ctx, types.NamespacedName{Name: cmName, Namespace: postgresDB.Namespace}, cm); err != nil { - if errors.IsNotFound(err) { - continue - } - return fmt.Errorf("failed to get ConfigMap %s for orphaning: %w", cmName, err) - } - if cm.Annotations[annotationRetainedFrom] == postgresDB.Name { - continue - } - stripOwnerReference(cm, postgresDB.UID) - if cm.Annotations == nil { - cm.Annotations = make(map[string]string) - } - cm.Annotations[annotationRetainedFrom] = postgresDB.Name - if err := r.Update(ctx, cm); err != nil { - return fmt.Errorf("failed to orphan ConfigMap %s: %w", cmName, err) - } - logger.Info("Orphaned ConfigMap", "name", cmName) - } - return nil -} - -// orphanSecrets strips ownerReferences and adds a retention annotation -// on Secrets for the given databases. -func (r *PostgresDatabaseReconciler) orphanSecrets( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - databases []enterprisev4.DatabaseDefinition, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range databases { - for _, role := range []string{secretRoleAdmin, secretRoleRW} { - secretName := roleSecretName(postgresDB.Name, dbSpec.Name, role) - secret := &corev1.Secret{} - if err := r.Get(ctx, types.NamespacedName{Name: secretName, Namespace: postgresDB.Namespace}, secret); err != nil { - if errors.IsNotFound(err) { - continue - } - return fmt.Errorf("failed to get Secret %s for orphaning: %w", secretName, err) - } - if secret.Annotations[annotationRetainedFrom] == postgresDB.Name { - continue - } - stripOwnerReference(secret, postgresDB.UID) - if secret.Annotations == nil { - secret.Annotations = make(map[string]string) - } - secret.Annotations[annotationRetainedFrom] = postgresDB.Name - if err := r.Update(ctx, secret); err != nil { - return fmt.Errorf("failed to orphan Secret %s: %w", secretName, err) - } - logger.Info("Orphaned Secret", "name", secretName) - } - } - return nil -} - -// deleteCNPGDatabases explicitly deletes CNPG Database CRs for the given databases. -func (r *PostgresDatabaseReconciler) deleteCNPGDatabases( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - databases []enterprisev4.DatabaseDefinition, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range databases { - cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) - db := &cnpgv1.Database{ - ObjectMeta: metav1.ObjectMeta{ - Name: cnpgDBName, - Namespace: postgresDB.Namespace, - }, - } - if err := r.Delete(ctx, db); err != nil { - if errors.IsNotFound(err) { - logger.Info("CNPG Database already deleted", "name", cnpgDBName) - continue - } - return fmt.Errorf("failed to delete CNPG Database %s: %w", cnpgDBName, err) - } - logger.Info("Deleted CNPG Database CR", "name", cnpgDBName) - } - return nil -} - -// deleteConfigMaps explicitly deletes ConfigMaps for the given databases. -func (r *PostgresDatabaseReconciler) deleteConfigMaps( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - databases []enterprisev4.DatabaseDefinition, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range databases { - cmName := configMapName(postgresDB.Name, dbSpec.Name) - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cmName, - Namespace: postgresDB.Namespace, - }, - } - if err := r.Delete(ctx, cm); err != nil { - if errors.IsNotFound(err) { - logger.Info("ConfigMap already deleted", "name", cmName) - continue - } - return fmt.Errorf("failed to delete ConfigMap %s: %w", cmName, err) - } - logger.Info("Deleted ConfigMap", "name", cmName) - } - return nil -} - -// deleteSecrets explicitly deletes Secrets for the given databases. -func (r *PostgresDatabaseReconciler) deleteSecrets( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - databases []enterprisev4.DatabaseDefinition, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range databases { - for _, role := range []string{secretRoleAdmin, secretRoleRW} { - secretName := roleSecretName(postgresDB.Name, dbSpec.Name, role) - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: postgresDB.Namespace, - }, - } - if err := r.Delete(ctx, secret); err != nil { - if errors.IsNotFound(err) { - logger.Info("Secret already deleted", "name", secretName) - continue - } - return fmt.Errorf("failed to delete Secret %s: %w", secretName, err) - } - logger.Info("Deleted Secret", "name", secretName) - } - } - return nil -} - -// buildRetainedRoles returns the SSA role list for databases that are being retained. -// Returns an empty slice when no databases are retained, which clears our field manager's claim. -func buildRetainedRoles(postgresDBName string, retainedDBs []enterprisev4.DatabaseDefinition) []enterprisev4.ManagedRole { - roles := make([]enterprisev4.ManagedRole, 0, len(retainedDBs)*2) - for _, dbSpec := range retainedDBs { - roles = append(roles, - enterprisev4.ManagedRole{ - Name: adminRoleName(dbSpec.Name), - Ensure: "present", - PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleAdmin)}, - }, - enterprisev4.ManagedRole{ - Name: rwRoleName(dbSpec.Name), - Ensure: "present", - PasswordSecretRef: &corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleRW)}, - }, - ) - } - return roles -} - -// patchManagedRolesOnDeletion applies an SSA patch to keep only retained databases' roles. -// -// SSA ensures that each patch only affects fields owned by our field manager -// (postgresdatabase-). This means when one PostgresDatabase is deleted, -// its role cleanup cannot interfere with roles managed by other PostgresDatabase -// controllers targeting the same PostgresCluster. -func (r *PostgresDatabaseReconciler) patchManagedRolesOnDeletion( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - cluster *enterprisev4.PostgresCluster, - retainedDBs []enterprisev4.DatabaseDefinition, -) error { - roles := buildRetainedRoles(postgresDB.Name, retainedDBs) - - rolePatch := &unstructured.Unstructured{ - Object: map[string]any{ - "apiVersion": cluster.APIVersion, - "kind": cluster.Kind, - "metadata": map[string]any{ - "name": cluster.Name, - "namespace": cluster.Namespace, - }, - "spec": map[string]any{ - "managedRoles": roles, - }, - }, - } - - fieldManager := fieldManagerName(postgresDB.Name) - if err := r.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManager)); err != nil { - return fmt.Errorf("failed to patch managed roles on deletion: %w", err) - } - - log.FromContext(ctx).Info("Patched managed roles on deletion", - "postgresDatabase", postgresDB.Name, - "retainedRoles", len(roles)) - return nil -} - -// stripOwnerReference removes only the ownerReference matching the given UID from obj, -// preserving any other owner references the object may have. -func stripOwnerReference(obj metav1.Object, ownerUID types.UID) { - refs := obj.GetOwnerReferences() - filtered := make([]metav1.OwnerReference, 0, len(refs)) - for _, ref := range refs { - if ref.UID != ownerUID { - filtered = append(filtered, ref) - } - } - obj.SetOwnerReferences(filtered) -} - -// adoptResource removes the retention annotation, restores the controller ownerRef, -// and updates the object. Works for any resource type (Secret, ConfigMap, CNPG Database). -func (r *PostgresDatabaseReconciler) adoptResource( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - obj client.Object, -) error { - annotations := obj.GetAnnotations() - delete(annotations, annotationRetainedFrom) - obj.SetAnnotations(annotations) - if err := controllerutil.SetControllerReference(postgresDB, obj, r.Scheme); err != nil { - return err - } - return r.Update(ctx, obj) + return dbcore.PostgresDatabaseService(ctx, r.Client, r.Scheme, postgresDB, dbadapter.NewDBRepository) } // SetupWithManager sets up the controller with the Manager. func (r *PostgresDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { - - // Index CNPG Databases by controller owner so getDatabasesInCNPGSpec can filter by owner name without a full list scan. if err := mgr.GetFieldIndexer().IndexField( context.Background(), &cnpgv1.Database{}, @@ -1067,374 +108,8 @@ func (r *PostgresDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&corev1.Secret{}). Owns(&corev1.ConfigMap{}). Named("postgresdatabase"). + WithOptions(controller.Options{ + MaxConcurrentReconciles: DatabaseTotalWorker, + }). Complete(r) } - -// DBRepo abstracts SQL execution so grant logic is testable without a live cluster. -// Connection lifecycle is managed internally — callers only call ExecGrants. -type DBRepo interface { - ExecGrants(ctx context.Context, dbName string) error -} - -type dbRepo struct { - conn *pgx.Conn -} - -// newDBRepo opens a direct superuser connection, bypassing any pooler. -// PgBouncer in transaction mode blocks DDL; password set on config avoids URL-encoding issues. -func newDBRepo(ctx context.Context, host, dbName, password string) (DBRepo, error) { - cfg, err := pgx.ParseConfig(fmt.Sprintf( - "postgres://%s@%s:%s/%s?sslmode=require&connect_timeout=%d", - superUsername, host, postgresPort, dbName, - int(dbConnectTimeout.Seconds()), - )) - if err != nil { - return nil, fmt.Errorf("parsing connection config for %s/%s: %w", host, dbName, err) - } - cfg.Password = password - - conn, err := pgx.ConnectConfig(ctx, cfg) - if err != nil { - return nil, fmt.Errorf("connecting to %s/%s: %w", host, dbName, err) - } - return &dbRepo{conn: conn}, nil -} - -// ExecGrants applies all privilege grants needed for the RW role on a single database. -// GRANT ON ALL TABLES/SEQUENCES covers existing objects; ALTER DEFAULT PRIVILEGES covers -// future ones created by the admin role (e.g. via migrations). -func (r *dbRepo) ExecGrants(ctx context.Context, dbName string) error { - defer r.conn.Close(context.Background()) - - adminRole := adminRoleName(dbName) - rwRole := rwRoleName(dbName) - - tx, err := r.conn.Begin(ctx) - if err != nil { - return fmt.Errorf("beginning transaction: %w", err) - } - - // Identifiers cannot be parameterised in PostgreSQL — fmt.Sprintf is correct here. - // These names are generated internally by our own functions, never from user input. - stmts := []string{ - fmt.Sprintf("GRANT CONNECT ON DATABASE %s TO %s", dbName, rwRole), - fmt.Sprintf("GRANT USAGE ON SCHEMA public TO %s", rwRole), - fmt.Sprintf("GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO %s", rwRole), - fmt.Sprintf("GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO %s", rwRole), - fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO %s", adminRole, rwRole), - fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO %s", adminRole, rwRole), - } - - for _, stmt := range stmts { - if _, err := tx.Exec(ctx, stmt); err != nil { - return fmt.Errorf("executing grant %q: %w", stmt, err) - } - } - - return tx.Commit(ctx) -} - -// hasNewDatabases returns true when spec contains a database not yet present in status. -// Used to skip the grants phase when a spec change is unrelated to the database set — -// grants only need to run when a new database is introduced, not on every spec update. -func hasNewDatabases(postgresDB *enterprisev4.PostgresDatabase) bool { - existing := make(map[string]bool, len(postgresDB.Status.Databases)) - for _, dbInfo := range postgresDB.Status.Databases { - existing[dbInfo.Name] = true - } - for _, dbSpec := range postgresDB.Spec.Databases { - if !existing[dbSpec.Name] { - return true - } - } - return false -} - -// reconcileRWRolePrivileges ensures the RW role has database-level access. -// CNPG owns role existence, not privileges — the RW role can authenticate but gets -// "permission denied" on every query until these grants are applied. -func reconcileRWRolePrivileges( - ctx context.Context, - rwHost string, - superPassword string, - dbNames []string, -) error { - logger := log.FromContext(ctx) - - var errs []error - for _, dbName := range dbNames { - db, err := newDBRepo(ctx, rwHost, dbName, superPassword) - if err != nil { - logger.Error(err, "Failed to connect to database", "database", dbName) - errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) - continue - } - if err := db.ExecGrants(ctx, dbName); err != nil { - logger.Error(err, "Failed to grant RW role privileges", "database", dbName) - errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) - continue - } - logger.Info("RW role privileges granted", "database", dbName, "rwRole", rwRoleName(dbName)) - } - - return stderrors.Join(errs...) -} - -// dbConnectTimeout caps how long we wait for the primary to accept a connection. -// A hung primary must not stall the reconcile goroutine indefinitely. -const dbConnectTimeout = 10 * time.Second - -// roleSecretName gives both secret creation and status wiring a single source of truth -// for naming — eliminating any risk of the two sides drifting out of sync. -func roleSecretName(postgresDBName, dbName, role string) string { - return fmt.Sprintf("%s-%s-%s", postgresDBName, dbName, role) -} - -func adminRoleName(dbName string) string { return dbName + "_admin" } - -func rwRoleName(dbName string) string { return dbName + "_rw" } - -func cnpgDatabaseName(postgresDBName, dbName string) string { - return fmt.Sprintf("%s-%s", postgresDBName, dbName) -} - -// generatePassword uses crypto/rand (via sethvargo/go-password) rather than math/rand -// because these credentials protect live database access — predictability is unacceptable. -func generatePassword() (string, error) { - return password.Generate(passwordLength, passwordDigits, passwordSymbols, false, true) -} - -// reconcileUserSecrets ensures admin and rw secrets exist for each database, -// delegating per-secret logic to ensureSecret. -func (r *PostgresDatabaseReconciler) reconcileUserSecrets( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, -) error { - for _, dbSpec := range postgresDB.Spec.Databases { - if err := r.ensureSecret(ctx, postgresDB, adminRoleName(dbSpec.Name), - roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)); err != nil { - return err - } - if err := r.ensureSecret(ctx, postgresDB, rwRoleName(dbSpec.Name), - roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)); err != nil { - return err - } - } - return nil -} - -// ensureSecret handles three states: missing (create), orphaned (re-adopt), or existing (no-op). -// Intentionally not using CreateOrUpdate because secrets must never be updated after creation. -// Rotating a password here would break live connections before the application picks up the change. -func (r *PostgresDatabaseReconciler) ensureSecret( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - roleName, secretName string, -) error { - secret, err := r.getSecret(ctx, postgresDB.Namespace, secretName) - if err != nil { - return err - } - logger := log.FromContext(ctx) - - switch { - case secret == nil: - logger.Info("Creating missing user secret", "name", secretName) - return r.createUserSecret(ctx, postgresDB, roleName, secretName) - case secret.Annotations[annotationRetainedFrom] == postgresDB.Name: - logger.Info("Re-adopting orphaned secret", "name", secretName) - return r.adoptResource(ctx, postgresDB, secret) - } - return nil -} - -// getSecret fetches a Secret by name, returning nil if not found. -// Non-NotFound errors are treated as real failures — a transient API error -// must not cause a spurious Create attempt. -func (r *PostgresDatabaseReconciler) getSecret(ctx context.Context, namespace, name string) (*corev1.Secret, error) { - logger := log.FromContext(ctx) - - secret := &corev1.Secret{} - err := r.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) - if errors.IsNotFound(err) { - return nil, nil - } - if err != nil { - logger.Error(err, "Failed to check secret existence", "secret", name) - return nil, err - } - return secret, nil -} - -// createUserSecret generates a password, builds the Secret, and creates it. -// AlreadyExists is treated as success — safe to retry after a partial failure. -func (r *PostgresDatabaseReconciler) createUserSecret( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - roleName string, - secretName string, -) error { - logger := log.FromContext(ctx) - - password, err := generatePassword() - if err != nil { - logger.Error(err, "Failed to generate password", "secret", secretName) - return err - } - - secret := buildPasswordSecret(postgresDB, secretName, roleName, password) - if err := controllerutil.SetControllerReference(postgresDB, secret, r.Scheme); err != nil { - return fmt.Errorf("failed to set owner reference on Secret %s: %w", secretName, err) - } - if err := r.Create(ctx, secret); err != nil { - if errors.IsAlreadyExists(err) { - return nil - } - logger.Error(err, "Failed to create secret", "secret", secretName) - return err - } - return nil -} - -// buildPasswordSecret constructs the Secret object with "username" and "password" keys required by CNPG. -// OwnerRef is set by the caller. -func buildPasswordSecret(postgresDB *enterprisev4.PostgresDatabase, secretName, roleName, password string) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: postgresDB.Namespace, - Labels: map[string]string{ - labelManagedBy: "splunk-operator", - labelCNPGReload: "true", - }, - }, - Data: map[string][]byte{ - "username": []byte(roleName), - "password": []byte(password), - }, - } -} - -// configMapName mirrors roleSecretName() so creation and status wiring share one source of truth. -func configMapName(postgresDBName, dbName string) string { - return fmt.Sprintf("%s-%s-config", postgresDBName, dbName) -} - -// clusterEndpoints holds fully-resolved connection hostnames for a cluster. -// PoolerRWHost and PoolerROHost are empty when connection pooling is disabled. -type clusterEndpoints struct { - RWHost string - ROHost string - PoolerRWHost string - PoolerROHost string -} - -func resolveClusterEndpoints(cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster, namespace string) clusterEndpoints { - // FQDN so consumers in other namespaces can resolve without extra config. - endpoints := clusterEndpoints{ - RWHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.WriteService, namespace), - ROHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.ReadService, namespace), - } - // Pooler service names follow the pattern set by postgrescluster_controller: {cnpgClusterName}-pooler-{rw|ro}. - if cluster.Status.ConnectionPoolerStatus != nil && cluster.Status.ConnectionPoolerStatus.Enabled { - endpoints.PoolerRWHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readWriteEndpoint, namespace) - endpoints.PoolerROHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readOnlyEndpoint, namespace) - } - return endpoints -} - -// buildDatabaseConfigMapBody is a pure function — no API calls, no decisions about which -// endpoints exist. All that is resolved upstream and encoded in endpoints before this is called. -func buildDatabaseConfigMapBody( - dbName string, - endpoints clusterEndpoints, -) map[string]string { - data := map[string]string{ - "dbname": dbName, - "port": postgresPort, - "rw-host": endpoints.RWHost, - "ro-host": endpoints.ROHost, - "admin-user": adminRoleName(dbName), - "rw-user": rwRoleName(dbName), - } - // Pooler keys are only written when pooling is active - if endpoints.PoolerRWHost != "" { - data["pooler-rw-host"] = endpoints.PoolerRWHost - } - if endpoints.PoolerROHost != "" { - data["pooler-ro-host"] = endpoints.PoolerROHost - } - return data -} - -// reconcileRoleConfigMaps mirrors reconcileUserSecrets: checks per-database, -// creates only what is absent. Endpoints are resolved by the caller so this function -// has a single responsibility: iteration and existence-gated creation. -// Orphaned ConfigMaps from a previous retain-deletion are re-adopted. -func (r *PostgresDatabaseReconciler) reconcileRoleConfigMaps( - ctx context.Context, - postgresDB *enterprisev4.PostgresDatabase, - endpoints clusterEndpoints, -) error { - logger := log.FromContext(ctx) - - for _, dbSpec := range postgresDB.Spec.Databases { - cmName := configMapName(postgresDB.Name, dbSpec.Name) - - cm := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cmName, - Namespace: postgresDB.Namespace, - Labels: map[string]string{ - labelManagedBy: "splunk-operator", - }, - }, - } - - _, err := controllerutil.CreateOrUpdate(ctx, r.Client, cm, func() error { - cm.Data = buildDatabaseConfigMapBody(dbSpec.Name, endpoints) - - // Set ownerRef on creation or re-adoption (orphaned objects have no ownerRef). - reAdopting := cm.Annotations[annotationRetainedFrom] == postgresDB.Name - if reAdopting { - logger.Info("Re-adopting orphaned ConfigMap", "name", cmName) - delete(cm.Annotations, annotationRetainedFrom) - } - if cm.CreationTimestamp.IsZero() || reAdopting { - if err := controllerutil.SetControllerReference(postgresDB, cm, r.Scheme); err != nil { - logger.Error(err, "Failed to set owner reference on ConfigMap", "configmap", cm.Name) - return err - } - } - return nil - }) - if err != nil { - logger.Error(err, "failed to create or update database configmap", "db", postgresDB.Name, "configmap", cmName) - return fmt.Errorf("failed to create or update database configmap %s: %w", cmName, err) - } - } - return nil -} - -// populateDatabaseStatus derives all secret ref names via roleSecretName() — the same function -// used during creation — so status refs are always consistent with actual secret names. -// Recomputing from spec rather than reading live secret names keeps this side-effect free. -func populateDatabaseStatus(postgresDB *enterprisev4.PostgresDatabase) []enterprisev4.DatabaseInfo { - databases := make([]enterprisev4.DatabaseInfo, 0, len(postgresDB.Spec.Databases)) - for _, dbSpec := range postgresDB.Spec.Databases { - databases = append(databases, enterprisev4.DatabaseInfo{ - Name: dbSpec.Name, - Ready: true, - AdminUserSecretRef: &corev1.LocalObjectReference{ - Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin), - }, - RWUserSecretRef: &corev1.LocalObjectReference{ - Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW), - }, - ConfigMapRef: &corev1.LocalObjectReference{ - Name: configMapName(postgresDB.Name, dbSpec.Name), - }, - }) - } - return databases -} diff --git a/internal/controller/postgresoperator_common_types.go b/internal/controller/postgresoperator_common_types.go deleted file mode 100644 index 984b51134..000000000 --- a/internal/controller/postgresoperator_common_types.go +++ /dev/null @@ -1,136 +0,0 @@ -package controller - -import ( - corev1 "k8s.io/api/core/v1" - "time" -) - -// This struct is used to compare the merged configuration from PostgresClusterClass and PostgresClusterSpec -// in a normalized way, and not to use CNPG-default values which are causing false positive diff state while reconciliation loop. -// It contains only the fields that are relevant for our reconciliation and that we want to compare when deciding whether to update the CNPG Cluster spec or not. -type normalizedCNPGClusterSpec struct { - ImageName string - Instances int - // Parameters we set, instead of complete spec from CNPG - CustomDefinedParameters map[string]string - PgHBA []string - DefaultDatabase string - Owner string - StorageSize string - Resources corev1.ResourceRequirements -} - -type reconcileDBPhases string -type reconcileClusterPhases string -type conditionTypes string -type conditionReasons string -type clusterReadyStatus string -type objectKind string - -const ( - // retryDelay is the default requeue interval when waiting on external state (CNPG, cluster). - retryDelay = time.Second * 15 - deletionPolicyRetain string = "Retain" - - // clusterNotFoundRetryDelay is longer than retryDelay — a missing cluster is unlikely - // to appear in 15 s and hammering the API is wasteful. - clusterNotFoundRetryDelay = time.Second * 30 - // cluster endpoint suffixes - readOnlyEndpoint string = "ro" - readWriteEndpoint string = "rw" - // default database name - defaultDatabaseName string = "postgres" - postgresDatabaseFinalizerName string = "postgresdatabases.enterprise.splunk.com/finalizer" - - annotationRetainedFrom string = "enterprise.splunk.com/retained-from" - - defaultSecretSuffix string = "-secret" - - defaultPoolerSuffix string = "-pooler-" - defaultConfigMapSuffix string = "-configmap" - defaultPort string = "5432" - superUsername string = "postgres" - postgresClusterFinalizerName string = "postgresclusters.enterprise.splunk.com/finalizer" - clusterDeletionPolicyDelete string = "Delete" - clusterDeletionPolicyRetain string = "Retain" - - // phases - readyDBPhase reconcileDBPhases = "Ready" - pendingDBPhase reconcileDBPhases = "Pending" - provisioningDBPhase reconcileDBPhases = "Provisioning" - failedDBPhase reconcileDBPhases = "Failed" - - // cluster phases - readyClusterPhase reconcileClusterPhases = "Ready" - pendingClusterPhase reconcileClusterPhases = "Pending" - provisioningClusterPhase reconcileClusterPhases = "Provisioning" - configuringClusterPhase reconcileClusterPhases = "Configuring" - failedClusterPhase reconcileClusterPhases = "Failed" - - // Condition types - clusterReady conditionTypes = "ClusterReady" - poolerReady conditionTypes = "PoolerReady" - rolesReady conditionTypes = "RolesReady" - databasesReady conditionTypes = "DatabasesReady" - secretsReady conditionTypes = "SecretsReady" - configMapsReady conditionTypes = "ConfigMapsReady" - configMapReady conditionTypes = "ConfigMapReady" - privilegesReady conditionTypes = "PrivilegesReady" - - // Condition reasons - reasonClusterNotFound conditionReasons = "ClusterNotFound" - reasonClusterProvisioning conditionReasons = "ClusterProvisioning" - reasonClusterInfoFetchFailed conditionReasons = "ClusterInfoFetchNotPossible" - reasonClusterAvailable conditionReasons = "ClusterAvailable" - reasonDatabasesAvailable conditionReasons = "DatabasesAvailable" - reasonSecretsCreated conditionReasons = "SecretsCreated" - reasonSecretsCreationFailed conditionReasons = "SecretsCreationFailed" - reasonWaitingForCNPG conditionReasons = "WaitingForCNPG" - reasonUsersCreationFailed conditionReasons = "UsersCreationFailed" - reasonUsersAvailable conditionReasons = "UsersAvailable" - reasonRoleConflict conditionReasons = "RoleConflict" - reasonSuperUserSecretFailed conditionReasons = "SuperUserSecretFailed" - reasonConfigMapsCreationFailed conditionReasons = "ConfigMapsCreationFailed" - reasonConfigMapsCreated conditionReasons = "ConfigMapsCreated" - reasonPrivilegesGranted conditionReasons = "PrivilegesGranted" - reasonPrivilegesGrantFailed conditionReasons = "PrivilegesGrantFailed" - - // Additional condition reasons for clusterReady conditionType - reasonClusterClassNotFound conditionReasons = "ClusterClassNotFound" - reasonManagedRolesFailed conditionReasons = "ManagedRolesReconciliationFailed" - reasonClusterBuildFailed conditionReasons = "ClusterBuildFailed" - reasonClusterBuildSucceeded conditionReasons = "ClusterBuildSucceeded" - reasonClusterGetFailed conditionReasons = "ClusterGetFailed" - reasonClusterPatchFailed conditionReasons = "ClusterPatchFailed" - reasonInvalidConfiguration conditionReasons = "InvalidConfiguration" - reasonConfigMapFailed conditionReasons = "ConfigMapReconciliationFailed" - reasonUserSecretFailed conditionReasons = "UserSecretReconciliationFailed" - - // Additional condition reasons for poolerReady conditionType - reasonPoolerReconciliationFailed conditionReasons = "PoolerReconciliationFailed" - reasonPoolerConfigMissing conditionReasons = "PoolerConfigMissing" - reasonPoolerCreating conditionReasons = "PoolerCreating" - reasonAllInstancesReady conditionReasons = "AllInstancesReady" - - // Additional condition reasons for mapping CNPG cluster statuses - reasonCNPGClusterHealthy conditionReasons = "CNPGClusterHealthy" - reasonCNPGProvisioning conditionReasons = "CNPGClusterProvisioning" - reasonCNPGSwitchover conditionReasons = "CNPGSwitchover" - reasonCNPGFailingOver conditionReasons = "CNPGFailingOver" - reasonCNPGRestarting conditionReasons = "CNPGRestarting" - reasonCNPGUpgrading conditionReasons = "CNPGUpgrading" - reasonCNPGApplyingConfig conditionReasons = "CNPGApplyingConfiguration" - reasonCNPGPromoting conditionReasons = "CNPGPromoting" - reasonCNPGWaitingForUser conditionReasons = "CNPGWaitingForUser" - reasonCNPGUnrecoverable conditionReasons = "CNPGUnrecoverable" - reasonCNPGProvisioningFailed conditionReasons = "CNPGProvisioningFailed" - reasonCNPGPluginError conditionReasons = "CNPGPluginError" - reasonCNPGImageError conditionReasons = "CNPGImageError" - reasonClusterDeleteFailed conditionReasons = "ClusterDeleteFailed" - - // Cluster status - ClusterNotFound clusterReadyStatus = "NotFound" - ClusterNotReady clusterReadyStatus = "NotReady" - ClusterNoProvisionerRef clusterReadyStatus = "NoProvisionerRef" - ClusterReady clusterReadyStatus = "Ready" -) diff --git a/pkg/postgresql/cluster/core/cluster.go b/pkg/postgresql/cluster/core/cluster.go new file mode 100644 index 000000000..66622d8ad --- /dev/null +++ b/pkg/postgresql/cluster/core/cluster.go @@ -0,0 +1,1006 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "context" + "fmt" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + password "github.com/sethvargo/go-password/password" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + log "sigs.k8s.io/controller-runtime/pkg/log" +) + +// PostgresClusterService is the application service entry point called by the primary adapter (reconciler). +func PostgresClusterService(ctx context.Context, c client.Client, scheme *runtime.Scheme, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.Info("Reconciling PostgresCluster", "name", req.Name, "namespace", req.Namespace) + + var cnpgCluster *cnpgv1.Cluster + var poolerEnabled bool + var postgresSecretName string + secret := &corev1.Secret{} + + // 1. Fetch the PostgresCluster instance, stop if not found. + postgresCluster := &enterprisev4.PostgresCluster{} + if err := c.Get(ctx, req.NamespacedName, postgresCluster); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresCluster deleted, skipping reconciliation") + return ctrl.Result{}, nil + } + logger.Error(err, "Unable to fetch PostgresCluster") + return ctrl.Result{}, err + } + if postgresCluster.Status.Resources == nil { + postgresCluster.Status.Resources = &enterprisev4.PostgresClusterResources{} + } + + updateStatus := func(conditionType conditionTypes, status metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileClusterPhases) error { + return setStatus(ctx, c, postgresCluster, conditionType, status, reason, message, phase) + } + + // Finalizer handling must come before any other processing. + if err := handleFinalizer(ctx, c, scheme, postgresCluster, secret); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresCluster already deleted, skipping finalizer update") + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to handle finalizer") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterDeleteFailed, + fmt.Sprintf("Failed to delete resources during cleanup: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if postgresCluster.GetDeletionTimestamp() != nil { + logger.Info("PostgresCluster is being deleted, cleanup complete") + return ctrl.Result{}, nil + } + + // Add finalizer if not present. + if !controllerutil.ContainsFinalizer(postgresCluster, PostgresClusterFinalizerName) { + controllerutil.AddFinalizer(postgresCluster, PostgresClusterFinalizerName) + if err := c.Update(ctx, postgresCluster); err != nil { + if apierrors.IsConflict(err) { + logger.Info("Conflict while adding finalizer, will retry on next reconcile") + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Failed to add finalizer to PostgresCluster") + return ctrl.Result{}, fmt.Errorf("failed to add finalizer: %w", err) + } + logger.Info("Finalizer added successfully") + return ctrl.Result{}, nil + } + + // 2. Load the referenced PostgresClusterClass. + clusterClass := &enterprisev4.PostgresClusterClass{} + if err := c.Get(ctx, client.ObjectKey{Name: postgresCluster.Spec.Class}, clusterClass); err != nil { + logger.Error(err, "Unable to fetch referenced PostgresClusterClass", "className", postgresCluster.Spec.Class) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterClassNotFound, + fmt.Sprintf("ClusterClass %s not found: %v", postgresCluster.Spec.Class, err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 3. Merge PostgresClusterSpec on top of PostgresClusterClass defaults. + mergedConfig, err := getMergedConfig(clusterClass, postgresCluster) + if err != nil { + logger.Error(err, "Failed to merge PostgresCluster configuration") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonInvalidConfiguration, + fmt.Sprintf("Failed to merge configuration: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 4. Resolve or derive the superuser secret name. + if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.SuperUserSecretRef != nil { + postgresSecretName = postgresCluster.Status.Resources.SuperUserSecretRef.Name + logger.Info("Using existing secret from status", "name", postgresSecretName) + } else { + postgresSecretName = fmt.Sprintf("%s%s", postgresCluster.Name, defaultSecretSuffix) + logger.Info("Generating new secret name", "name", postgresSecretName) + } + + secretExists, secretErr := clusterSecretExists(ctx, c, postgresCluster.Namespace, postgresSecretName, secret) + if secretErr != nil { + logger.Error(secretErr, "Failed to check if PostgresCluster secret exists", "name", postgresSecretName) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonUserSecretFailed, + fmt.Sprintf("Failed to check secret existence: %v", secretErr), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, secretErr + } + if !secretExists { + logger.Info("Creating PostgresCluster secret", "name", postgresSecretName) + if err := ensureClusterSecret(ctx, c, scheme, postgresCluster, postgresSecretName, secret); err != nil { + logger.Error(err, "Failed to ensure PostgresCluster secret", "name", postgresSecretName) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonUserSecretFailed, + fmt.Sprintf("Failed to generate PostgresCluster secret: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := c.Status().Update(ctx, postgresCluster); err != nil { + if apierrors.IsConflict(err) { + logger.Info("Conflict after secret creation, will requeue") + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Failed to update status after secret creation") + return ctrl.Result{}, err + } + logger.Info("SuperUserSecretRef persisted to status") + } + + // Re-attach ownerRef if it was stripped (e.g. by a Retain-policy deletion of a previous cluster). + hasOwnerRef, ownerRefErr := controllerutil.HasOwnerReference(secret.GetOwnerReferences(), postgresCluster, scheme) + if ownerRefErr != nil { + logger.Error(ownerRefErr, "Failed to check owner reference on Secret") + return ctrl.Result{}, fmt.Errorf("failed to check owner reference on secret: %w", ownerRefErr) + } + if secretExists && !hasOwnerRef { + logger.Info("Connecting existing secret to PostgresCluster by adding owner reference", "name", postgresSecretName) + originalSecret := secret.DeepCopy() + if err := ctrl.SetControllerReference(postgresCluster, secret, scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set controller reference on existing secret: %w", err) + } + if err := patchObject(ctx, c, originalSecret, secret, "Secret"); err != nil { + logger.Error(err, "Failed to patch existing secret with controller reference") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonSuperUserSecretFailed, + fmt.Sprintf("Failed to patch existing secret: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + logger.Info("Existing secret linked successfully") + } + + if postgresCluster.Status.Resources.SuperUserSecretRef == nil { + postgresCluster.Status.Resources.SuperUserSecretRef = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: postgresSecretName}, + Key: secretKeyPassword, + } + } + + // 5. Build desired CNPG Cluster spec. + desiredSpec := buildCNPGClusterSpec(mergedConfig, postgresSecretName) + + // 6. Fetch existing CNPG Cluster or create it. + existingCNPG := &cnpgv1.Cluster{} + err = c.Get(ctx, types.NamespacedName{Name: postgresCluster.Name, Namespace: postgresCluster.Namespace}, existingCNPG) + switch { + case apierrors.IsNotFound(err): + logger.Info("CNPG Cluster not found, creating", "name", postgresCluster.Name) + newCluster := buildCNPGCluster(scheme, postgresCluster, mergedConfig, postgresSecretName) + if err := c.Create(ctx, newCluster); err != nil { + logger.Error(err, "Failed to create CNPG Cluster") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterBuildFailed, + fmt.Sprintf("Failed to create CNPG Cluster: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterBuildSucceeded, + "CNPG Cluster created", pendingClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + logger.Info("CNPG Cluster created successfully, requeueing for status update", "name", postgresCluster.Name) + return ctrl.Result{RequeueAfter: retryDelay}, nil + case err != nil: + logger.Error(err, "Failed to get CNPG Cluster") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterGetFailed, + fmt.Sprintf("Failed to get CNPG Cluster: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 7. Patch CNPG Cluster spec if drift detected. + cnpgCluster = existingCNPG + currentNormalized := normalizeCNPGClusterSpec(cnpgCluster.Spec, mergedConfig.Spec.PostgreSQLConfig) + desiredNormalized := normalizeCNPGClusterSpec(desiredSpec, mergedConfig.Spec.PostgreSQLConfig) + + if !equality.Semantic.DeepEqual(currentNormalized, desiredNormalized) { + logger.Info("Detected drift in CNPG Cluster spec, patching", "name", cnpgCluster.Name) + originalCluster := cnpgCluster.DeepCopy() + cnpgCluster.Spec = desiredSpec + + switch patchErr := patchObject(ctx, c, originalCluster, cnpgCluster, "CNPGCluster"); { + case apierrors.IsConflict(patchErr): + logger.Info("Conflict occurred while updating CNPG Cluster, requeueing", "name", cnpgCluster.Name) + return ctrl.Result{Requeue: true}, nil + case patchErr != nil: + logger.Error(patchErr, "Failed to patch CNPG Cluster", "name", cnpgCluster.Name) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterPatchFailed, + fmt.Sprintf("Failed to patch CNPG Cluster: %v", patchErr), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, patchErr + default: + logger.Info("CNPG Cluster patched successfully, requeueing for status update", "name", cnpgCluster.Name) + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + } + + // 7a. Reconcile ManagedRoles. + if err := reconcileManagedRoles(ctx, c, postgresCluster, cnpgCluster); err != nil { + logger.Error(err, "Failed to reconcile managed roles") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonManagedRolesFailed, + fmt.Sprintf("Failed to reconcile managed roles: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 7b. Reconcile Connection Pooler. + poolerEnabled = mergedConfig.Spec.ConnectionPoolerEnabled != nil && *mergedConfig.Spec.ConnectionPoolerEnabled + switch { + case !poolerEnabled: + if err := deleteConnectionPoolers(ctx, c, postgresCluster); err != nil { + logger.Error(err, "Failed to delete connection poolers") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to delete connection poolers: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + postgresCluster.Status.ConnectionPoolerStatus = nil + meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, string(poolerReady)) + + case !poolerExists(ctx, c, postgresCluster, readWriteEndpoint) || !poolerExists(ctx, c, postgresCluster, readOnlyEndpoint): + if mergedConfig.CNPG == nil || mergedConfig.CNPG.ConnectionPooler == nil { + logger.Info("Connection pooler enabled but no config found in class or cluster spec, skipping", + "class", postgresCluster.Spec.Class, "cluster", postgresCluster.Name) + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerConfigMissing, + fmt.Sprintf("Connection pooler is enabled but no config found in class %q or cluster %q", + postgresCluster.Spec.Class, postgresCluster.Name), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, nil + } + if cnpgCluster.Status.Phase != cnpgv1.PhaseHealthy { + logger.Info("CNPG Cluster not healthy yet, pending pooler creation", "clusterPhase", cnpgCluster.Status.Phase) + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonCNPGClusterNotHealthy, + "Waiting for CNPG cluster to become healthy before creating poolers", pendingClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + if err := createOrUpdateConnectionPoolers(ctx, c, scheme, postgresCluster, mergedConfig, cnpgCluster); err != nil { + logger.Error(err, "Failed to reconcile connection pooler") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to reconcile connection pooler: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + logger.Info("Connection Poolers created, requeueing to check readiness") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerCreating, + "Connection poolers are being provisioned", provisioningClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + + case !arePoolersReady(ctx, c, postgresCluster): + logger.Info("Connection Poolers are not ready yet, requeueing") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerCreating, + "Connection poolers are being provisioned", pendingClusterPhase); statusErr != nil { + if apierrors.IsConflict(statusErr) { + logger.Info("Conflict updating pooler status, will requeue") + return ctrl.Result{Requeue: true}, nil + } + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + + default: + if err := syncPoolerStatus(ctx, c, postgresCluster); err != nil { + logger.Error(err, "Failed to sync pooler status") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to sync pooler status: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + } + + // 8. Reconcile ConfigMap when CNPG cluster is healthy. + if cnpgCluster.Status.Phase == cnpgv1.PhaseHealthy { + logger.Info("CNPG Cluster is ready, reconciling ConfigMap for connection details") + desiredCM, err := generateConfigMap(ctx, c, scheme, postgresCluster, cnpgCluster, postgresSecretName) + if err != nil { + logger.Error(err, "Failed to generate ConfigMap") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonConfigMapFailed, + fmt.Sprintf("Failed to generate ConfigMap: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: desiredCM.Name, Namespace: desiredCM.Namespace}} + createOrUpdateResult, err := controllerutil.CreateOrUpdate(ctx, c, cm, func() error { + cm.Data = desiredCM.Data + cm.Annotations = desiredCM.Annotations + cm.Labels = desiredCM.Labels + if !metav1.IsControlledBy(cm, postgresCluster) { + if err := ctrl.SetControllerReference(postgresCluster, cm, scheme); err != nil { + return fmt.Errorf("set controller reference failed: %w", err) + } + } + return nil + }) + if err != nil { + logger.Error(err, "Failed to reconcile ConfigMap", "name", desiredCM.Name) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonConfigMapFailed, + fmt.Sprintf("Failed to reconcile ConfigMap: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + switch createOrUpdateResult { + case controllerutil.OperationResultCreated: + logger.Info("ConfigMap created", "name", desiredCM.Name) + case controllerutil.OperationResultUpdated: + logger.Info("ConfigMap updated", "name", desiredCM.Name) + default: + logger.Info("ConfigMap unchanged", "name", desiredCM.Name) + } + if postgresCluster.Status.Resources.ConfigMapRef == nil { + postgresCluster.Status.Resources.ConfigMapRef = &corev1.LocalObjectReference{Name: desiredCM.Name} + } + } + + // 9. Final status sync. + if err := syncStatus(ctx, c, postgresCluster, cnpgCluster); err != nil { + logger.Error(err, "Failed to sync status") + if apierrors.IsConflict(err) { + logger.Info("Conflict during status update, will requeue") + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, fmt.Errorf("failed to sync status: %w", err) + } + if cnpgCluster.Status.Phase == cnpgv1.PhaseHealthy && arePoolersReady(ctx, c, postgresCluster) { + logger.Info("Poolers are ready, syncing pooler status") + _ = syncPoolerStatus(ctx, c, postgresCluster) + } + logger.Info("Reconciliation complete") + return ctrl.Result{}, nil +} + +// getMergedConfig overlays PostgresCluster spec on top of the class defaults. +// Class values are used only where the cluster spec is silent. +func getMergedConfig(class *enterprisev4.PostgresClusterClass, cluster *enterprisev4.PostgresCluster) (*MergedConfig, error) { + result := cluster.Spec.DeepCopy() + + // Config is optional on the class — apply defaults only when provided. + if defaults := class.Spec.Config; defaults != nil { + if result.Instances == nil { + result.Instances = defaults.Instances + } + if result.PostgresVersion == nil { + result.PostgresVersion = defaults.PostgresVersion + } + if result.Resources == nil { + result.Resources = defaults.Resources + } + if result.Storage == nil { + result.Storage = defaults.Storage + } + if len(result.PostgreSQLConfig) == 0 { + result.PostgreSQLConfig = defaults.PostgreSQLConfig + } + if len(result.PgHBA) == 0 { + result.PgHBA = defaults.PgHBA + } + } + + if result.Instances == nil || result.PostgresVersion == nil || result.Storage == nil { + return nil, fmt.Errorf("invalid configuration for class %s: instances, postgresVersion and storage are required", class.Name) + } + if result.PostgreSQLConfig == nil { + result.PostgreSQLConfig = make(map[string]string) + } + if result.PgHBA == nil { + result.PgHBA = make([]string, 0) + } + if result.Resources == nil { + result.Resources = &corev1.ResourceRequirements{} + } + + return &MergedConfig{Spec: result, CNPG: class.Spec.CNPG}, nil +} + +// buildCNPGClusterSpec builds the desired CNPG ClusterSpec. +// IMPORTANT: any field added here must also appear in normalizeCNPGClusterSpec, +// otherwise spec drift will be silently ignored. +func buildCNPGClusterSpec(cfg *MergedConfig, secretName string) cnpgv1.ClusterSpec { + return cnpgv1.ClusterSpec{ + ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s", *cfg.Spec.PostgresVersion), + Instances: int(*cfg.Spec.Instances), + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + Parameters: cfg.Spec.PostgreSQLConfig, + PgHBA: cfg.Spec.PgHBA, + }, + SuperuserSecret: &cnpgv1.LocalObjectReference{Name: secretName}, + EnableSuperuserAccess: ptr.To(true), + Bootstrap: &cnpgv1.BootstrapConfiguration{ + InitDB: &cnpgv1.BootstrapInitDB{ + Database: defaultDatabaseName, + Owner: superUsername, + Secret: &cnpgv1.LocalObjectReference{Name: secretName}, + }, + }, + StorageConfiguration: cnpgv1.StorageConfiguration{ + Size: cfg.Spec.Storage.String(), + }, + Resources: *cfg.Spec.Resources, + } +} + +func buildCNPGCluster(scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, secretName string) *cnpgv1.Cluster { + cnpg := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: cluster.Name, Namespace: cluster.Namespace}, + Spec: buildCNPGClusterSpec(cfg, secretName), + } + ctrl.SetControllerReference(cluster, cnpg, scheme) + return cnpg +} + +func normalizeCNPGClusterSpec(spec cnpgv1.ClusterSpec, customDefinedParameters map[string]string) normalizedCNPGClusterSpec { + normalized := normalizedCNPGClusterSpec{ + ImageName: spec.ImageName, + Instances: spec.Instances, + StorageSize: spec.StorageConfiguration.Size, + Resources: spec.Resources, + } + if len(customDefinedParameters) > 0 { + normalized.CustomDefinedParameters = make(map[string]string) + for k := range customDefinedParameters { + normalized.CustomDefinedParameters[k] = spec.PostgresConfiguration.Parameters[k] + } + } + if len(spec.PostgresConfiguration.PgHBA) > 0 { + normalized.PgHBA = spec.PostgresConfiguration.PgHBA + } + if spec.Bootstrap != nil && spec.Bootstrap.InitDB != nil { + normalized.DefaultDatabase = spec.Bootstrap.InitDB.Database + normalized.Owner = spec.Bootstrap.InitDB.Owner + } + return normalized +} + +// reconcileManagedRoles synchronizes ManagedRoles from PostgresCluster spec to CNPG Cluster managed.roles. +func reconcileManagedRoles(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster) error { + logger := log.FromContext(ctx) + + if len(cluster.Spec.ManagedRoles) == 0 { + logger.Info("No managed roles to reconcile") + return nil + } + + desiredRoles := make([]cnpgv1.RoleConfiguration, 0, len(cluster.Spec.ManagedRoles)) + for _, role := range cluster.Spec.ManagedRoles { + r := cnpgv1.RoleConfiguration{ + Name: role.Name, + Ensure: cnpgv1.EnsureAbsent, + } + // Exists bool replaces the old Ensure string enum ("present"/"absent"). + if role.Exists { + r.Ensure = cnpgv1.EnsurePresent + r.Login = true + } + if role.PasswordSecretRef != nil { + // Pass only the secret name to CNPG — CNPG always reads the "password" key. + r.PasswordSecret = &cnpgv1.LocalObjectReference{Name: role.PasswordSecretRef.LocalObjectReference.Name} + } + desiredRoles = append(desiredRoles, r) + } + + var currentRoles []cnpgv1.RoleConfiguration + if cnpgCluster.Spec.Managed != nil { + currentRoles = cnpgCluster.Spec.Managed.Roles + } + + if equality.Semantic.DeepEqual(currentRoles, desiredRoles) { + logger.Info("CNPG Cluster roles already match desired state, no update needed") + return nil + } + + logger.Info("CNPG Cluster roles differ from desired state, updating", + "currentCount", len(currentRoles), "desiredCount", len(desiredRoles)) + + originalCluster := cnpgCluster.DeepCopy() + if cnpgCluster.Spec.Managed == nil { + cnpgCluster.Spec.Managed = &cnpgv1.ManagedConfiguration{} + } + cnpgCluster.Spec.Managed.Roles = desiredRoles + + if err := c.Patch(ctx, cnpgCluster, client.MergeFrom(originalCluster)); err != nil { + return fmt.Errorf("failed to patch CNPG Cluster with managed roles: %w", err) + } + logger.Info("Successfully updated CNPG Cluster with managed roles", "roleCount", len(desiredRoles)) + return nil +} + +func poolerResourceName(clusterName, poolerType string) string { + return fmt.Sprintf("%s%s%s", clusterName, defaultPoolerSuffix, poolerType) +} + +func poolerExists(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, poolerType string) bool { + pooler := &cnpgv1.Pooler{} + err := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, poolerType), + Namespace: cluster.Namespace, + }, pooler) + if apierrors.IsNotFound(err) { + return false + } + if err != nil { + log.FromContext(ctx).Error(err, "Failed to check pooler existence", "type", poolerType) + return false + } + return true +} + +func arePoolersReady(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster) bool { + rwPooler := &cnpgv1.Pooler{} + rwErr := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, readWriteEndpoint), + Namespace: cluster.Namespace, + }, rwPooler) + + roPooler := &cnpgv1.Pooler{} + roErr := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, readOnlyEndpoint), + Namespace: cluster.Namespace, + }, roPooler) + + return isPoolerReady(rwPooler, rwErr) && isPoolerReady(roPooler, roErr) +} + +// isPoolerReady checks if a pooler has all instances scheduled. +// CNPG PoolerStatus only tracks scheduled instances, not ready pods. +func isPoolerReady(pooler *cnpgv1.Pooler, err error) bool { + if err != nil { + return false + } + desired := int32(1) + if pooler.Spec.Instances != nil { + desired = *pooler.Spec.Instances + } + return pooler.Status.Instances >= desired +} + +func poolerInstanceCount(p *cnpgv1.Pooler) (desired, scheduled int32) { + desired = 1 + if p.Spec.Instances != nil { + desired = *p.Spec.Instances + } + return desired, p.Status.Instances +} + +// createOrUpdateConnectionPoolers creates RW and RO poolers if they don't exist. +func createOrUpdateConnectionPoolers(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, cnpgCluster *cnpgv1.Cluster) error { + if err := createConnectionPooler(ctx, c, scheme, cluster, cfg, cnpgCluster, readWriteEndpoint); err != nil { + return fmt.Errorf("failed to reconcile RW pooler: %w", err) + } + if err := createConnectionPooler(ctx, c, scheme, cluster, cfg, cnpgCluster, readOnlyEndpoint); err != nil { + return fmt.Errorf("failed to reconcile RO pooler: %w", err) + } + return nil +} + +func createConnectionPooler(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, cnpgCluster *cnpgv1.Cluster, poolerType string) error { + poolerName := poolerResourceName(cluster.Name, poolerType) + existing := &cnpgv1.Pooler{} + err := c.Get(ctx, types.NamespacedName{Name: poolerName, Namespace: cluster.Namespace}, existing) + if err == nil { + return nil // already exists + } + if !apierrors.IsNotFound(err) { + return err + } + log.FromContext(ctx).Info("Creating CNPG Pooler", "name", poolerName, "type", poolerType) + return c.Create(ctx, buildCNPGPooler(scheme, cluster, cfg, cnpgCluster, poolerType)) +} + +func buildCNPGPooler(scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, cnpgCluster *cnpgv1.Cluster, poolerType string) *cnpgv1.Pooler { + pc := cfg.CNPG.ConnectionPooler + instances := *pc.Instances + mode := cnpgv1.PgBouncerPoolMode(*pc.Mode) + pooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{Name: poolerResourceName(cluster.Name, poolerType), Namespace: cluster.Namespace}, + Spec: cnpgv1.PoolerSpec{ + Cluster: cnpgv1.LocalObjectReference{Name: cnpgCluster.Name}, + Instances: &instances, + Type: cnpgv1.PoolerType(poolerType), + PgBouncer: &cnpgv1.PgBouncerSpec{ + PoolMode: mode, + Parameters: pc.Config, + }, + }, + } + ctrl.SetControllerReference(cluster, pooler, scheme) + return pooler +} + +// deleteConnectionPoolers removes RW and RO poolers if they exist. +func deleteConnectionPoolers(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster) error { + logger := log.FromContext(ctx) + for _, poolerType := range []string{readWriteEndpoint, readOnlyEndpoint} { + poolerName := poolerResourceName(cluster.Name, poolerType) + if !poolerExists(ctx, c, cluster, poolerType) { + continue + } + pooler := &cnpgv1.Pooler{} + if err := c.Get(ctx, types.NamespacedName{Name: poolerName, Namespace: cluster.Namespace}, pooler); err != nil { + if apierrors.IsNotFound(err) { + continue + } + return fmt.Errorf("failed to get pooler %s: %w", poolerName, err) + } + logger.Info("Deleting CNPG Pooler", "name", poolerName) + if err := c.Delete(ctx, pooler); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete pooler %s: %w", poolerName, err) + } + } + return nil +} + +// syncPoolerStatus populates ConnectionPoolerStatus and the PoolerReady condition. +func syncPoolerStatus(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster) error { + rwPooler := &cnpgv1.Pooler{} + if err := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, readWriteEndpoint), + Namespace: cluster.Namespace, + }, rwPooler); err != nil { + return err + } + + roPooler := &cnpgv1.Pooler{} + if err := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, readOnlyEndpoint), + Namespace: cluster.Namespace, + }, roPooler); err != nil { + return err + } + + cluster.Status.ConnectionPoolerStatus = &enterprisev4.ConnectionPoolerStatus{Enabled: true} + rwDesired, rwScheduled := poolerInstanceCount(rwPooler) + roDesired, roScheduled := poolerInstanceCount(roPooler) + + return setStatus(ctx, c, cluster, poolerReady, metav1.ConditionTrue, reasonAllInstancesReady, + fmt.Sprintf("%s: %d/%d, %s: %d/%d", readWriteEndpoint, rwScheduled, rwDesired, readOnlyEndpoint, roScheduled, roDesired), + readyClusterPhase) +} + +// syncStatus maps CNPG Cluster state to PostgresCluster status. +func syncStatus(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster) error { + cluster.Status.ProvisionerRef = &corev1.ObjectReference{ + APIVersion: "postgresql.cnpg.io/v1", + Kind: "Cluster", + Namespace: cnpgCluster.Namespace, + Name: cnpgCluster.Name, + UID: cnpgCluster.UID, + } + + var phase reconcileClusterPhases + var condStatus metav1.ConditionStatus + var reason conditionReasons + var message string + + switch cnpgCluster.Status.Phase { + case cnpgv1.PhaseHealthy: + phase, condStatus, reason, message = readyClusterPhase, metav1.ConditionTrue, reasonCNPGClusterHealthy, "Cluster is up and running" + case cnpgv1.PhaseFirstPrimary, cnpgv1.PhaseCreatingReplica, cnpgv1.PhaseWaitingForInstancesToBeActive: + phase, condStatus, reason = provisioningClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioning + message = fmt.Sprintf("CNPG cluster provisioning: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseSwitchover: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGSwitchover, "Cluster changing primary node" + case cnpgv1.PhaseFailOver: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGFailingOver, "Pod missing, need to change primary" + case cnpgv1.PhaseInplacePrimaryRestart, cnpgv1.PhaseInplaceDeletePrimaryRestart: + phase, condStatus, reason = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGRestarting + message = fmt.Sprintf("CNPG cluster restarting: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseUpgrade, cnpgv1.PhaseMajorUpgrade, cnpgv1.PhaseUpgradeDelayed, cnpgv1.PhaseOnlineUpgrading: + phase, condStatus, reason = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGUpgrading + message = fmt.Sprintf("CNPG cluster upgrading: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseApplyingConfiguration: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGApplyingConfig, "Configuration change is being applied" + case cnpgv1.PhaseReplicaClusterPromotion: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGPromoting, "Replica is being promoted to primary" + case cnpgv1.PhaseWaitingForUser: + phase, condStatus, reason, message = failedClusterPhase, metav1.ConditionFalse, reasonCNPGWaitingForUser, "Action from the user is required" + case cnpgv1.PhaseUnrecoverable: + phase, condStatus, reason, message = failedClusterPhase, metav1.ConditionFalse, reasonCNPGUnrecoverable, "Cluster failed, needs manual intervention" + case cnpgv1.PhaseCannotCreateClusterObjects: + phase, condStatus, reason, message = failedClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioningFailed, "Cluster resources cannot be created" + case cnpgv1.PhaseUnknownPlugin, cnpgv1.PhaseFailurePlugin: + phase, condStatus, reason = failedClusterPhase, metav1.ConditionFalse, reasonCNPGPluginError + message = fmt.Sprintf("CNPG plugin error: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseImageCatalogError, cnpgv1.PhaseArchitectureBinaryMissing: + phase, condStatus, reason = failedClusterPhase, metav1.ConditionFalse, reasonCNPGImageError + message = fmt.Sprintf("CNPG image error: %s", cnpgCluster.Status.Phase) + case "": + phase, condStatus, reason, message = pendingClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioning, "CNPG cluster is pending creation" + default: + phase, condStatus, reason = provisioningClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioning + message = fmt.Sprintf("CNPG cluster phase: %s", cnpgCluster.Status.Phase) + } + + return setStatus(ctx, c, cluster, clusterReady, condStatus, reason, message, phase) +} + +// setStatus sets the phase, condition and persists the status. +func setStatus(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, condType conditionTypes, status metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileClusterPhases) error { + p := string(phase) + cluster.Status.Phase = &p + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(condType), + Status: status, + Reason: string(reason), + Message: message, + ObservedGeneration: cluster.Generation, + }) + if err := c.Status().Update(ctx, cluster); err != nil { + return fmt.Errorf("failed to update PostgresCluster status: %w", err) + } + return nil +} + +// generateConfigMap builds a ConfigMap with connection details for the PostgresCluster. +func generateConfigMap(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster, secretName string) (*corev1.ConfigMap, error) { + cmName := fmt.Sprintf("%s%s", cluster.Name, defaultConfigMapSuffix) + if cluster.Status.Resources != nil && cluster.Status.Resources.ConfigMapRef != nil { + cmName = cluster.Status.Resources.ConfigMapRef.Name + } + + data := map[string]string{ + "CLUSTER_RW_ENDPOINT": fmt.Sprintf("%s-rw.%s", cnpgCluster.Name, cnpgCluster.Namespace), + "CLUSTER_RO_ENDPOINT": fmt.Sprintf("%s-ro.%s", cnpgCluster.Name, cnpgCluster.Namespace), + "CLUSTER_R_ENDPOINT": fmt.Sprintf("%s-r.%s", cnpgCluster.Name, cnpgCluster.Namespace), + "DEFAULT_CLUSTER_PORT": defaultPort, + "SUPER_USER_NAME": superUsername, + "SUPER_USER_SECRET_REF": secretName, + } + if poolerExists(ctx, c, cluster, readWriteEndpoint) && poolerExists(ctx, c, cluster, readOnlyEndpoint) { + data["CLUSTER_POOLER_RW_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(cnpgCluster.Name, readWriteEndpoint), cnpgCluster.Namespace) + data["CLUSTER_POOLER_RO_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(cnpgCluster.Name, readOnlyEndpoint), cnpgCluster.Namespace) + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cluster.Namespace, + Labels: map[string]string{"app.kubernetes.io/managed-by": "postgrescluster-controller"}, + }, + Data: data, + } + if err := ctrl.SetControllerReference(cluster, cm, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + return cm, nil +} + +// ensureClusterSecret creates the superuser secret if it doesn't exist and persists the ref to status. +func ensureClusterSecret(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, secretName string, secret *corev1.Secret) error { + err := c.Get(ctx, types.NamespacedName{Name: secretName, Namespace: cluster.Namespace}, secret) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + if apierrors.IsNotFound(err) { + pw, err := generatePassword() + if err != nil { + return err + } + newSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: cluster.Namespace}, + StringData: map[string]string{"username": superUsername, "password": pw}, + Type: corev1.SecretTypeOpaque, + } + if err := ctrl.SetControllerReference(cluster, newSecret, scheme); err != nil { + return err + } + if err := c.Create(ctx, newSecret); err != nil { + return err + } + } + if cluster.Status.Resources == nil { + cluster.Status.Resources = &enterprisev4.PostgresClusterResources{} + } + cluster.Status.Resources.SuperUserSecretRef = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: secretKeyPassword, + } + return nil +} + +func clusterSecretExists(ctx context.Context, c client.Client, namespace, name string, secret *corev1.Secret) (bool, error) { + err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) + if apierrors.IsNotFound(err) { + return false, nil + } + return err == nil, err +} + +// deleteCNPGCluster deletes the CNPG Cluster if it exists. +func deleteCNPGCluster(ctx context.Context, c client.Client, cnpgCluster *cnpgv1.Cluster) error { + logger := log.FromContext(ctx) + if cnpgCluster == nil { + logger.Info("CNPG Cluster not found, skipping deletion") + return nil + } + logger.Info("Deleting CNPG Cluster", "name", cnpgCluster.Name) + if err := c.Delete(ctx, cnpgCluster); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete CNPG Cluster: %w", err) + } + return nil +} + +// handleFinalizer processes deletion cleanup: removes poolers, then deletes or orphans the CNPG Cluster +// based on ClusterDeletionPolicy, then removes the finalizer. +func handleFinalizer(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, secret *corev1.Secret) error { + logger := log.FromContext(ctx) + if cluster.GetDeletionTimestamp() == nil { + logger.Info("PostgresCluster not marked for deletion, skipping finalizer logic") + return nil + } + if !controllerutil.ContainsFinalizer(cluster, PostgresClusterFinalizerName) { + logger.Info("Finalizer not present on PostgresCluster, skipping finalizer logic") + return nil + } + + cnpgCluster := &cnpgv1.Cluster{} + err := c.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cnpgCluster) + if err != nil { + if apierrors.IsNotFound(err) { + cnpgCluster = nil + logger.Info("CNPG cluster not found during cleanup") + } else { + return fmt.Errorf("failed to fetch CNPG cluster during cleanup: %w", err) + } + } + logger.Info("Processing finalizer cleanup for PostgresCluster") + + if err := deleteConnectionPoolers(ctx, c, cluster); err != nil { + logger.Error(err, "Failed to delete connection poolers during cleanup") + return fmt.Errorf("failed to delete connection poolers: %w", err) + } + + // Dereference *string — empty string falls through to default (unknown policy). + policy := "" + if cluster.Spec.ClusterDeletionPolicy != nil { + policy = *cluster.Spec.ClusterDeletionPolicy + } + + switch policy { + case clusterDeletionPolicyDelete: + logger.Info("ClusterDeletionPolicy is 'Delete', deleting CNPG Cluster and associated resources") + if cnpgCluster != nil { + if err := deleteCNPGCluster(ctx, c, cnpgCluster); err != nil { + logger.Error(err, "Failed to delete CNPG Cluster during finalizer cleanup") + return fmt.Errorf("failed to delete CNPG Cluster during finalizer cleanup: %w", err) + } + } else { + logger.Info("CNPG Cluster not found, skipping deletion") + } + + case clusterDeletionPolicyRetain: + logger.Info("ClusterDeletionPolicy is 'Retain', removing owner references to orphan CNPG Cluster") + if cnpgCluster != nil { + originalCNPG := cnpgCluster.DeepCopy() + refRemoved, err := removeOwnerRef(scheme, cluster, cnpgCluster) + if err != nil { + return fmt.Errorf("failed to remove owner reference from CNPG cluster: %w", err) + } + if !refRemoved { + logger.Info("Owner reference already removed from CNPG Cluster, skipping patch") + } + if err := patchObject(ctx, c, originalCNPG, cnpgCluster, "CNPGCluster"); err != nil { + return fmt.Errorf("failed to patch CNPG cluster after removing owner reference: %w", err) + } + logger.Info("Removed owner reference from CNPG Cluster") + } + + // Remove owner reference from the superuser Secret to prevent cascading deletion. + if cluster.Status.Resources != nil && cluster.Status.Resources.SuperUserSecretRef != nil { + secretName := cluster.Status.Resources.SuperUserSecretRef.Name + if err := c.Get(ctx, types.NamespacedName{Name: secretName, Namespace: cluster.Namespace}, secret); err != nil { + if !apierrors.IsNotFound(err) { + logger.Error(err, "Failed to fetch Secret during cleanup") + return fmt.Errorf("failed to fetch secret during cleanup: %w", err) + } + logger.Info("Secret not found, skipping owner reference removal", "secret", secretName) + } else { + originalSecret := secret.DeepCopy() + refRemoved, err := removeOwnerRef(scheme, cluster, secret) + if err != nil { + return fmt.Errorf("failed to remove owner reference from Secret: %w", err) + } + if refRemoved { + if err := patchObject(ctx, c, originalSecret, secret, "Secret"); err != nil { + return fmt.Errorf("failed to patch Secret after removing owner reference: %w", err) + } + } + logger.Info("Removed owner reference from Secret") + } + } + + default: + logger.Info("Unknown ClusterDeletionPolicy", "policy", policy) + } + + controllerutil.RemoveFinalizer(cluster, PostgresClusterFinalizerName) + if err := c.Update(ctx, cluster); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresCluster already deleted, skipping finalizer update") + return nil + } + logger.Error(err, "Failed to remove finalizer from PostgresCluster") + return fmt.Errorf("failed to remove finalizer: %w", err) + } + logger.Info("Finalizer removed, cleanup complete") + return nil +} + +func removeOwnerRef(scheme *runtime.Scheme, owner, obj client.Object) (bool, error) { + hasRef, err := controllerutil.HasOwnerReference(obj.GetOwnerReferences(), owner, scheme) + if err != nil { + return false, fmt.Errorf("failed to check owner reference: %w", err) + } + if !hasRef { + return false, nil + } + if err := controllerutil.RemoveOwnerReference(owner, obj, scheme); err != nil { + return false, fmt.Errorf("failed to remove owner reference: %w", err) + } + return true, nil +} + +// patchObject patches obj from original; treats NotFound as a no-op. +func patchObject(ctx context.Context, c client.Client, original, obj client.Object, kind objectKind) error { + logger := log.FromContext(ctx) + if err := c.Patch(ctx, obj, client.MergeFrom(original)); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("Object not found, skipping patch", "kind", kind, "name", obj.GetName()) + return nil + } + return fmt.Errorf("failed to patch %s object: %w", kind, err) + } + logger.Info("Patched object successfully", "kind", kind, "name", obj.GetName()) + return nil +} + +func generatePassword() (string, error) { + const ( + length = 32 + digits = 8 + symbols = 0 + ) + return password.Generate(length, digits, symbols, false, true) +} diff --git a/pkg/postgresql/cluster/core/types.go b/pkg/postgresql/cluster/core/types.go new file mode 100644 index 000000000..19886fd73 --- /dev/null +++ b/pkg/postgresql/cluster/core/types.go @@ -0,0 +1,102 @@ +package core + +import ( + "time" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" +) + +// normalizedCNPGClusterSpec is a subset of cnpgv1.ClusterSpec fields used for drift detection. +// Only fields we set in buildCNPGClusterSpec are included — CNPG-injected defaults are excluded +// to avoid false-positive drift on every reconcile. +type normalizedCNPGClusterSpec struct { + ImageName string + Instances int + CustomDefinedParameters map[string]string + PgHBA []string + DefaultDatabase string + Owner string + StorageSize string + Resources corev1.ResourceRequirements +} + +// MergedConfig is the resolved configuration after overlaying PostgresCluster on PostgresClusterClass defaults. +type MergedConfig struct { + Spec *enterprisev4.PostgresClusterSpec + CNPG *enterprisev4.CNPGConfig +} + +type reconcileClusterPhases string +type conditionTypes string +type conditionReasons string +type objectKind string + +const ( + retryDelay = time.Second * 15 + + readOnlyEndpoint string = "ro" + readWriteEndpoint string = "rw" + + defaultDatabaseName string = "postgres" + superUsername string = "postgres" + defaultPort string = "5432" + + secretKeyPassword string = "password" + defaultSecretSuffix string = "-secret" + defaultPoolerSuffix string = "-pooler-" + defaultConfigMapSuffix string = "-configmap" + + clusterDeletionPolicyDelete string = "Delete" + clusterDeletionPolicyRetain string = "Retain" + + // PostgresClusterFinalizerName is exported so the primary adapter (controller) can + // reference it in event predicates without duplicating the string. + PostgresClusterFinalizerName string = "postgresclusters.enterprise.splunk.com/finalizer" + + // cluster phases + readyClusterPhase reconcileClusterPhases = "Ready" + pendingClusterPhase reconcileClusterPhases = "Pending" + provisioningClusterPhase reconcileClusterPhases = "Provisioning" + configuringClusterPhase reconcileClusterPhases = "Configuring" + failedClusterPhase reconcileClusterPhases = "Failed" + + // condition types + clusterReady conditionTypes = "ClusterReady" + poolerReady conditionTypes = "PoolerReady" + + // condition reasons — clusterReady + reasonClusterClassNotFound conditionReasons = "ClusterClassNotFound" + reasonManagedRolesFailed conditionReasons = "ManagedRolesReconciliationFailed" + reasonClusterBuildFailed conditionReasons = "ClusterBuildFailed" + reasonClusterBuildSucceeded conditionReasons = "ClusterBuildSucceeded" + reasonClusterGetFailed conditionReasons = "ClusterGetFailed" + reasonClusterPatchFailed conditionReasons = "ClusterPatchFailed" + reasonInvalidConfiguration conditionReasons = "InvalidConfiguration" + reasonConfigMapFailed conditionReasons = "ConfigMapReconciliationFailed" + reasonUserSecretFailed conditionReasons = "UserSecretReconciliationFailed" + reasonSuperUserSecretFailed conditionReasons = "SuperUserSecretFailed" + reasonClusterDeleteFailed conditionReasons = "ClusterDeleteFailed" + + // condition reasons — poolerReady + reasonPoolerReconciliationFailed conditionReasons = "PoolerReconciliationFailed" + reasonPoolerConfigMissing conditionReasons = "PoolerConfigMissing" + reasonPoolerCreating conditionReasons = "PoolerCreating" + reasonAllInstancesReady conditionReasons = "AllInstancesReady" + + // condition reasons — CNPG cluster phase mapping + reasonCNPGClusterNotHealthy conditionReasons = "CNPGClusterNotHealthy" + reasonCNPGClusterHealthy conditionReasons = "CNPGClusterHealthy" + reasonCNPGProvisioning conditionReasons = "CNPGClusterProvisioning" + reasonCNPGSwitchover conditionReasons = "CNPGSwitchover" + reasonCNPGFailingOver conditionReasons = "CNPGFailingOver" + reasonCNPGRestarting conditionReasons = "CNPGRestarting" + reasonCNPGUpgrading conditionReasons = "CNPGUpgrading" + reasonCNPGApplyingConfig conditionReasons = "CNPGApplyingConfiguration" + reasonCNPGPromoting conditionReasons = "CNPGPromoting" + reasonCNPGWaitingForUser conditionReasons = "CNPGWaitingForUser" + reasonCNPGUnrecoverable conditionReasons = "CNPGUnrecoverable" + reasonCNPGProvisioningFailed conditionReasons = "CNPGProvisioningFailed" + reasonCNPGPluginError conditionReasons = "CNPGPluginError" + reasonCNPGImageError conditionReasons = "CNPGImageError" +) diff --git a/pkg/postgresql/database/adapter/db_repository.go b/pkg/postgresql/database/adapter/db_repository.go new file mode 100644 index 000000000..0b23f685c --- /dev/null +++ b/pkg/postgresql/database/adapter/db_repository.go @@ -0,0 +1,80 @@ +// Package adapter contains driven adapters for the PostgresDatabase domain. +// Each adapter implements a port defined in core/ports.go. +package adapter + +import ( + "context" + "fmt" + "time" + + dbcore "github.com/splunk/splunk-operator/pkg/postgresql/database/core" + + "github.com/jackc/pgx/v5" +) + +const ( + superUsername = "postgres" + postgresPort = "5432" + dbConnectTimeout = 10 * time.Second +) + +// pgDBRepository is the pgx-backed adapter for the core.DBRepo port. +// It owns the full connection lifecycle: open on construction, close on ExecGrants return. +type pgDBRepository struct { + conn *pgx.Conn +} + +// ExecGrants applies all privilege grants needed for the RW role on a single database. +// GRANT ON ALL TABLES/SEQUENCES covers existing objects; ALTER DEFAULT PRIVILEGES covers +// future ones created by the admin role (e.g. via migrations). +func (r *pgDBRepository) ExecGrants(ctx context.Context, dbName string) error { + defer r.conn.Close(context.Background()) + + adminRole := dbName + "_admin" + rwRole := dbName + "_rw" + + tx, err := r.conn.Begin(ctx) + if err != nil { + return fmt.Errorf("beginning transaction: %w", err) + } + + // Identifiers cannot be parameterised in PostgreSQL — fmt.Sprintf is correct here. + // Role names are generated internally by our own functions, never from user input. + stmts := []string{ + fmt.Sprintf("GRANT CONNECT ON DATABASE %s TO %s", dbName, rwRole), + fmt.Sprintf("GRANT USAGE ON SCHEMA public TO %s", rwRole), + fmt.Sprintf("GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO %s", rwRole), + fmt.Sprintf("GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO %s", rwRole), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO %s", adminRole, rwRole), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO %s", adminRole, rwRole), + } + + for _, stmt := range stmts { + if _, err := tx.Exec(ctx, stmt); err != nil { + return fmt.Errorf("executing grant %q: %w", stmt, err) + } + } + + return tx.Commit(ctx) +} + +// NewDBRepository opens a direct superuser connection, bypassing any pooler. +// PgBouncer in transaction mode blocks DDL; password is set on the config +// struct to avoid URL-encoding issues with special characters. +func NewDBRepository(ctx context.Context, host, dbName, password string) (dbcore.DBRepo, error) { + cfg, err := pgx.ParseConfig(fmt.Sprintf( + "postgres://%s@%s:%s/%s?sslmode=require&connect_timeout=%d", + superUsername, host, postgresPort, dbName, + int(dbConnectTimeout.Seconds()), + )) + if err != nil { + return nil, fmt.Errorf("parsing connection config for %s/%s: %w", host, dbName, err) + } + cfg.Password = password + + conn, err := pgx.ConnectConfig(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("connecting to %s/%s: %w", host, dbName, err) + } + return &pgDBRepository{conn: conn}, nil +} diff --git a/pkg/postgresql/database/core/database.go b/pkg/postgresql/database/core/database.go new file mode 100644 index 000000000..1ae2227d7 --- /dev/null +++ b/pkg/postgresql/database/core/database.go @@ -0,0 +1,941 @@ +package core + +import ( + "context" + "encoding/json" + stderrors "errors" + "fmt" + "slices" + "strings" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/sethvargo/go-password/password" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// NewDBRepoFunc constructs a DBRepo adapter for the given host and database. +// Injected by the controller so the core never imports the pgx adapter directly. +type NewDBRepoFunc func(ctx context.Context, host, dbName, password string) (DBRepo, error) + +// PostgresDatabaseService is the application service entry point called by the primary adapter (reconciler). +// newDBRepo is injected to keep the core free of pgx imports. +func PostgresDatabaseService( + ctx context.Context, + c client.Client, + scheme *runtime.Scheme, + postgresDB *enterprisev4.PostgresDatabase, + newDBRepo NewDBRepoFunc, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.Info("Reconciling PostgresDatabase", "name", postgresDB.Name, "namespace", postgresDB.Namespace) + + updateStatus := func(conditionType conditionTypes, conditionStatus metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileDBPhases) error { + return setStatus(ctx, c, postgresDB, conditionType, conditionStatus, reason, message, phase) + } + + // Finalizer: cleanup on deletion, register on creation. + if postgresDB.GetDeletionTimestamp() != nil { + if err := handleDeletion(ctx, c, postgresDB); err != nil { + logger.Error(err, "Cleanup failed for PostgresDatabase") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + if !controllerutil.ContainsFinalizer(postgresDB, postgresDatabaseFinalizerName) { + controllerutil.AddFinalizer(postgresDB, postgresDatabaseFinalizerName) + if err := c.Update(ctx, postgresDB); err != nil { + logger.Error(err, "Failed to add finalizer to PostgresDatabase") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + // ObservedGeneration equality means all phases completed on the current spec — nothing to do. + if postgresDB.Status.ObservedGeneration != nil && *postgresDB.Status.ObservedGeneration == postgresDB.Generation { + logger.Info("Spec unchanged and all phases complete, skipping") + return ctrl.Result{}, nil + } + + // Phase: ClusterValidation + cluster, clusterStatus, err := ensureClusterReady(ctx, c, postgresDB) + if err != nil { + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterInfoFetchFailed, + "Can't reach Cluster CR due to transient errors", pendingDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + logger.Info("Cluster validation done", "clusterName", postgresDB.Spec.ClusterRef.Name, "status", clusterStatus) + + switch clusterStatus { + case ClusterNotFound: + if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterNotFound, "Cluster CR not found", pendingDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: clusterNotFoundRetryDelay}, nil + + case ClusterNotReady, ClusterNoProvisionerRef: + if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterProvisioning, "Cluster is not in ready state yet", pendingDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + + case ClusterReady: + if err := updateStatus(clusterReady, metav1.ConditionTrue, reasonClusterAvailable, "Cluster is operational", provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + } + + // Phase: RoleConflictCheck — verify no other SSA field manager already owns our roles. + roleConflicts := getRoleConflicts(postgresDB, cluster) + if len(roleConflicts) > 0 { + conflictMsg := fmt.Sprintf("Role conflict: %s. "+ + "If you deleted a previous PostgresDatabase, recreate it with the original name to re-adopt the orphaned resources.", + strings.Join(roleConflicts, ", ")) + logger.Error(nil, conflictMsg) + if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonRoleConflict, conflictMsg, failedDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, nil + } + + // We need the CNPG Cluster directly because PostgresCluster status does not yet + // surface managed role reconciliation state. + cnpgCluster := &cnpgv1.Cluster{} + if err := c.Get(ctx, types.NamespacedName{ + Name: cluster.Status.ProvisionerRef.Name, + Namespace: cluster.Status.ProvisionerRef.Namespace, + }, cnpgCluster); err != nil { + logger.Error(err, "Failed to fetch CNPG Cluster") + return ctrl.Result{}, err + } + + // Phase: CredentialProvisioning — secrets must exist before roles are patched. + // CNPG rejects a PasswordSecretRef pointing at a missing secret. + if err := reconcileUserSecrets(ctx, c, scheme, postgresDB); err != nil { + if statusErr := updateStatus(secretsReady, metav1.ConditionFalse, reasonSecretsCreationFailed, + fmt.Sprintf("Failed to reconcile user secrets: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(secretsReady, metav1.ConditionTrue, reasonSecretsCreated, + fmt.Sprintf("All secrets provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: ConnectionMetadata — ConfigMaps carry connection info consumers need as soon + // as databases are ready, so they are created alongside secrets. + endpoints := resolveClusterEndpoints(cluster, cnpgCluster, postgresDB.Namespace) + if err := reconcileRoleConfigMaps(ctx, c, scheme, postgresDB, endpoints); err != nil { + if statusErr := updateStatus(configMapsReady, metav1.ConditionFalse, reasonConfigMapsCreationFailed, + fmt.Sprintf("Failed to reconcile ConfigMaps: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(configMapsReady, metav1.ConditionTrue, reasonConfigMapsCreated, + fmt.Sprintf("All ConfigMaps provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: RoleProvisioning + desiredUsers := getDesiredUsers(postgresDB) + actualRoles := getUsersInClusterSpec(cluster) + var missing []string + for _, role := range desiredUsers { + if !slices.Contains(actualRoles, role) { + missing = append(missing, role) + } + } + + if len(missing) > 0 { + logger.Info("User spec changed, patching CNPG Cluster", "missing", missing) + if err := patchManagedRoles(ctx, c, postgresDB, cluster); err != nil { + logger.Error(err, "Failed to patch users in CNPG Cluster") + return ctrl.Result{}, err + } + if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, + fmt.Sprintf("Waiting for %d roles to be reconciled", len(desiredUsers)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + + notReadyRoles, err := verifyRolesReady(ctx, desiredUsers, cnpgCluster) + if err != nil { + if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonUsersCreationFailed, + fmt.Sprintf("Role creation failed: %v", err), failedDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if len(notReadyRoles) > 0 { + if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, + fmt.Sprintf("Waiting for roles to be reconciled: %v", notReadyRoles), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + if err := updateStatus(rolesReady, metav1.ConditionTrue, reasonUsersAvailable, + fmt.Sprintf("All %d users in PostgreSQL", len(desiredUsers)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: DatabaseProvisioning + if err := reconcileCNPGDatabases(ctx, c, scheme, postgresDB, cluster); err != nil { + logger.Error(err, "Failed to reconcile CNPG Databases") + return ctrl.Result{}, err + } + + notReadyDBs, err := verifyDatabasesReady(ctx, c, postgresDB) + if err != nil { + logger.Error(err, "Failed to verify database status") + return ctrl.Result{}, err + } + if len(notReadyDBs) > 0 { + if err := updateStatus(databasesReady, metav1.ConditionFalse, reasonWaitingForCNPG, + fmt.Sprintf("Waiting for databases to be ready: %v", notReadyDBs), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + if err := updateStatus(databasesReady, metav1.ConditionTrue, reasonDatabasesAvailable, + fmt.Sprintf("All %d databases ready", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: RWRolePrivileges + // Skipped when no new databases are detected — ALTER DEFAULT PRIVILEGES covers tables + // added by migrations on existing databases. Re-runs for all databases when a new one + // is added (idempotent for existing ones, required for the new one). + if hasNewDatabases(postgresDB) { + // Read from our own status — we created this secret and wrote the SecretKeySelector + // (name + key) when the cluster was provisioned. This avoids depending on CNPG's + // spec field and makes the key explicit. + if cluster.Status.Resources == nil || cluster.Status.Resources.SuperUserSecretRef == nil { + return ctrl.Result{}, fmt.Errorf("PostgresCluster %s has no superuser secret ref in status", cluster.Name) + } + superSecretRef := cluster.Status.Resources.SuperUserSecretRef + superSecret := &corev1.Secret{} + if err := c.Get(ctx, types.NamespacedName{ + Name: superSecretRef.Name, + Namespace: postgresDB.Namespace, + }, superSecret); err != nil { + return ctrl.Result{}, fmt.Errorf("fetching superuser secret %s: %w", superSecretRef.Name, err) + } + pw, ok := superSecret.Data[superSecretRef.Key] + if !ok { + return ctrl.Result{}, fmt.Errorf("superuser secret %s missing %q key", superSecretRef.Name, superSecretRef.Key) + } + + dbNames := make([]string, 0, len(postgresDB.Spec.Databases)) + for _, dbSpec := range postgresDB.Spec.Databases { + dbNames = append(dbNames, dbSpec.Name) + } + + if err := reconcileRWRolePrivileges(ctx, endpoints.RWHost, string(pw), dbNames, newDBRepo); err != nil { + if statusErr := updateStatus(privilegesReady, metav1.ConditionFalse, reasonPrivilegesGrantFailed, + fmt.Sprintf("Failed to grant RW role privileges: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(privilegesReady, metav1.ConditionTrue, reasonPrivilegesGranted, + fmt.Sprintf("RW role privileges granted for all %d databases", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { + return ctrl.Result{}, err + } + } + + postgresDB.Status.Databases = populateDatabaseStatus(postgresDB) + postgresDB.Status.ObservedGeneration = &postgresDB.Generation + + if err := c.Status().Update(ctx, postgresDB); err != nil { + if errors.IsConflict(err) { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, fmt.Errorf("persisting final status: %w", err) + } + + logger.Info("All phases complete") + return ctrl.Result{}, nil +} + +// reconcileRWRolePrivileges calls the DBRepo port for each database. +// Errors are collected so all databases are attempted before returning. +func reconcileRWRolePrivileges( + ctx context.Context, + rwHost, superPassword string, + dbNames []string, + newDBRepo NewDBRepoFunc, +) error { + logger := log.FromContext(ctx) + var errs []error + for _, dbName := range dbNames { + repo, err := newDBRepo(ctx, rwHost, dbName, superPassword) + if err != nil { + logger.Error(err, "Failed to connect to database", "database", dbName) + errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) + continue + } + if err := repo.ExecGrants(ctx, dbName); err != nil { + logger.Error(err, "Failed to grant RW role privileges", "database", dbName) + errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) + continue + } + logger.Info("RW role privileges granted", "database", dbName, "rwRole", rwRoleName(dbName)) + } + return stderrors.Join(errs...) +} + +func ensureClusterReady(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase) (*enterprisev4.PostgresCluster, clusterReadyStatus, error) { + logger := log.FromContext(ctx) + cluster := &enterprisev4.PostgresCluster{} + if err := c.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { + if errors.IsNotFound(err) { + return nil, ClusterNotFound, nil + } + logger.Error(err, "Failed to fetch Cluster", "name", postgresDB.Spec.ClusterRef.Name) + return nil, ClusterNotReady, err + } + if cluster.Status.Phase == nil || *cluster.Status.Phase != string(ClusterReady) { + return cluster, ClusterNotReady, nil + } + if cluster.Status.ProvisionerRef == nil { + return cluster, ClusterNoProvisionerRef, nil + } + return cluster, ClusterReady, nil +} + +func getDesiredUsers(postgresDB *enterprisev4.PostgresDatabase) []string { + users := make([]string, 0, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + users = append(users, adminRoleName(dbSpec.Name), rwRoleName(dbSpec.Name)) + } + return users +} + +func getUsersInClusterSpec(cluster *enterprisev4.PostgresCluster) []string { + users := make([]string, 0, len(cluster.Spec.ManagedRoles)) + for _, role := range cluster.Spec.ManagedRoles { + users = append(users, role.Name) + } + return users +} + +func getRoleConflicts(postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) []string { + myManager := fieldManagerName(postgresDB.Name) + desired := make(map[string]struct{}, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + desired[adminRoleName(dbSpec.Name)] = struct{}{} + desired[rwRoleName(dbSpec.Name)] = struct{}{} + } + roleOwners := managedRoleOwners(cluster.ManagedFields) + var conflicts []string + for roleName := range desired { + if owner, exists := roleOwners[roleName]; exists && owner != myManager { + conflicts = append(conflicts, fmt.Sprintf("%s (owned by %s)", roleName, owner)) + } + } + return conflicts +} + +func managedRoleOwners(managedFields []metav1.ManagedFieldsEntry) map[string]string { + owners := make(map[string]string) + for _, mf := range managedFields { + if mf.FieldsV1 == nil { + continue + } + for _, name := range parseRoleNames(mf.FieldsV1.Raw) { + owners[name] = mf.Manager + } + } + return owners +} + +func parseRoleNames(raw []byte) []string { + var fields map[string]any + if err := json.Unmarshal(raw, &fields); err != nil { + return nil + } + spec, _ := fields["f:spec"].(map[string]any) + roles, _ := spec["f:managedRoles"].(map[string]any) + var names []string + for key := range roles { + var k struct{ Name string } + if err := json.Unmarshal([]byte(strings.TrimPrefix(key, "k:")), &k); err == nil && k.Name != "" { + names = append(names, k.Name) + } + } + return names +} + +func patchManagedRoles(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) error { + logger := log.FromContext(ctx) + allRoles := make([]enterprisev4.ManagedRole, 0, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + allRoles = append(allRoles, + enterprisev4.ManagedRole{ + Name: adminRoleName(dbSpec.Name), + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)}, + Key: secretKeyPassword}, + }, + enterprisev4.ManagedRole{ + Name: rwRoleName(dbSpec.Name), + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)}, + Key: secretKeyPassword}, + }) + } + rolePatch := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": cluster.APIVersion, + "kind": cluster.Kind, + "metadata": map[string]any{"name": cluster.Name, "namespace": cluster.Namespace}, + "spec": map[string]any{"managedRoles": allRoles}, + }, + } + fieldManager := fieldManagerName(postgresDB.Name) + if err := c.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManager)); err != nil { + logger.Error(err, "Failed to add users to PostgresCluster", "postgresDatabase", postgresDB.Name) + return fmt.Errorf("patching managed roles for PostgresDatabase %s: %w", postgresDB.Name, err) + } + logger.Info("Users added to PostgresCluster via SSA", "postgresDatabase", postgresDB.Name, "roleCount", len(allRoles)) + return nil +} + +func verifyRolesReady(ctx context.Context, expectedUsers []string, cnpgCluster *cnpgv1.Cluster) ([]string, error) { + logger := log.FromContext(ctx) + if cnpgCluster.Status.ManagedRolesStatus.CannotReconcile != nil { + for _, userName := range expectedUsers { + if errs, exists := cnpgCluster.Status.ManagedRolesStatus.CannotReconcile[userName]; exists { + return nil, fmt.Errorf("user %s reconciliation failed: %v", userName, errs) + } + } + } + reconciled := cnpgCluster.Status.ManagedRolesStatus.ByStatus[cnpgv1.RoleStatusReconciled] + var notReady []string + for _, userName := range expectedUsers { + if !slices.Contains(reconciled, userName) { + notReady = append(notReady, userName) + } + } + if len(notReady) > 0 { + logger.Info("Users not reconciled yet", "pending", notReady) + } + return notReady, nil +} + +func reconcileCNPGDatabases(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) error { + logger := log.FromContext(ctx) + for _, dbSpec := range postgresDB.Spec.Databases { + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + reclaimPolicy := cnpgv1.DatabaseReclaimDelete + if dbSpec.DeletionPolicy == deletionPolicyRetain { + reclaimPolicy = cnpgv1.DatabaseReclaimRetain + } + cnpgDB := &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{Name: cnpgDBName, Namespace: postgresDB.Namespace}, + } + _, err := controllerutil.CreateOrUpdate(ctx, c, cnpgDB, func() error { + cnpgDB.Spec = cnpgv1.DatabaseSpec{ + Name: dbSpec.Name, + Owner: adminRoleName(dbSpec.Name), + ClusterRef: corev1.LocalObjectReference{Name: cluster.Status.ProvisionerRef.Name}, + ReclaimPolicy: reclaimPolicy, + } + reAdopting := cnpgDB.Annotations[annotationRetainedFrom] == postgresDB.Name + if reAdopting { + logger.Info("Re-adopting orphaned CNPG Database", "name", cnpgDBName) + delete(cnpgDB.Annotations, annotationRetainedFrom) + } + if cnpgDB.CreationTimestamp.IsZero() || reAdopting { + return controllerutil.SetControllerReference(postgresDB, cnpgDB, scheme) + } + return nil + }) + if err != nil { + return fmt.Errorf("reconciling CNPG Database %s: %w", cnpgDBName, err) + } + } + return nil +} + +func verifyDatabasesReady(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase) ([]string, error) { + var notReady []string + for _, dbSpec := range postgresDB.Spec.Databases { + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + cnpgDB := &cnpgv1.Database{} + if err := c.Get(ctx, types.NamespacedName{Name: cnpgDBName, Namespace: postgresDB.Namespace}, cnpgDB); err != nil { + return nil, fmt.Errorf("getting CNPG Database %s: %w", cnpgDBName, err) + } + if cnpgDB.Status.Applied == nil || !*cnpgDB.Status.Applied { + notReady = append(notReady, dbSpec.Name) + } + } + return notReady, nil +} + +func setStatus(ctx context.Context, c client.Client, db *enterprisev4.PostgresDatabase, conditionType conditionTypes, conditionStatus metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileDBPhases) error { + meta.SetStatusCondition(&db.Status.Conditions, metav1.Condition{ + Type: string(conditionType), + Status: conditionStatus, + Reason: string(reason), + Message: message, + ObservedGeneration: db.Generation, + }) + p := string(phase) + db.Status.Phase = &p + return c.Status().Update(ctx, db) +} + +func buildDeletionPlan(databases []enterprisev4.DatabaseDefinition) deletionPlan { + var plan deletionPlan + for _, db := range databases { + if db.DeletionPolicy == deletionPolicyRetain { + plan.retained = append(plan.retained, db) + } else { + plan.deleted = append(plan.deleted, db) + } + } + return plan +} + +func handleDeletion(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase) error { + plan := buildDeletionPlan(postgresDB.Spec.Databases) + if err := orphanRetainedResources(ctx, c, postgresDB, plan.retained); err != nil { + return err + } + if err := deleteRemovedResources(ctx, c, postgresDB, plan.deleted); err != nil { + return err + } + if err := cleanupManagedRoles(ctx, c, postgresDB, plan); err != nil { + return err + } + controllerutil.RemoveFinalizer(postgresDB, postgresDatabaseFinalizerName) + if err := c.Update(ctx, postgresDB); err != nil { + if errors.IsNotFound(err) { + return nil + } + return fmt.Errorf("removing finalizer: %w", err) + } + log.FromContext(ctx).Info("Cleanup complete", "name", postgresDB.Name, "retained", len(plan.retained), "deleted", len(plan.deleted)) + return nil +} + +func orphanRetainedResources(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, retained []enterprisev4.DatabaseDefinition) error { + if err := orphanCNPGDatabases(ctx, c, postgresDB, retained); err != nil { + return err + } + if err := orphanConfigMaps(ctx, c, postgresDB, retained); err != nil { + return err + } + return orphanSecrets(ctx, c, postgresDB, retained) +} + +func deleteRemovedResources(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, deleted []enterprisev4.DatabaseDefinition) error { + if err := deleteCNPGDatabases(ctx, c, postgresDB, deleted); err != nil { + return err + } + if err := deleteConfigMaps(ctx, c, postgresDB, deleted); err != nil { + return err + } + return deleteSecrets(ctx, c, postgresDB, deleted) +} + +func cleanupManagedRoles(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, plan deletionPlan) error { + if len(plan.deleted) == 0 { + return nil + } + cluster := &enterprisev4.PostgresCluster{} + if err := c.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("getting PostgresCluster for role cleanup: %w", err) + } + log.FromContext(ctx).Info("PostgresCluster already deleted, skipping role cleanup") + return nil + } + return patchManagedRolesOnDeletion(ctx, c, postgresDB, cluster, plan.retained) +} + +func orphanCNPGDatabases(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + db := &cnpgv1.Database{} + if err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: postgresDB.Namespace}, db); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("getting CNPG Database %s for orphaning: %w", name, err) + } + if db.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(db, postgresDB.UID) + if db.Annotations == nil { + db.Annotations = make(map[string]string) + } + db.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := c.Update(ctx, db); err != nil { + return fmt.Errorf("orphaning CNPG Database %s: %w", name, err) + } + logger.Info("Orphaned CNPG Database CR", "name", name) + } + return nil +} + +func orphanConfigMaps(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{} + if err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: postgresDB.Namespace}, cm); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("getting ConfigMap %s for orphaning: %w", name, err) + } + if cm.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(cm, postgresDB.UID) + if cm.Annotations == nil { + cm.Annotations = make(map[string]string) + } + cm.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := c.Update(ctx, cm); err != nil { + return fmt.Errorf("orphaning ConfigMap %s: %w", name, err) + } + logger.Info("Orphaned ConfigMap", "name", name) + } + return nil +} + +func orphanSecrets(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + for _, role := range []string{secretRoleAdmin, secretRoleRW} { + name := roleSecretName(postgresDB.Name, dbSpec.Name, role) + secret := &corev1.Secret{} + if err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: postgresDB.Namespace}, secret); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("getting Secret %s for orphaning: %w", name, err) + } + if secret.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(secret, postgresDB.UID) + if secret.Annotations == nil { + secret.Annotations = make(map[string]string) + } + secret.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := c.Update(ctx, secret); err != nil { + return fmt.Errorf("orphaning Secret %s: %w", name, err) + } + logger.Info("Orphaned Secret", "name", name) + } + } + return nil +} + +func deleteCNPGDatabases(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + db := &cnpgv1.Database{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: postgresDB.Namespace}} + if err := c.Delete(ctx, db); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("deleting CNPG Database %s: %w", name, err) + } + logger.Info("Deleted CNPG Database CR", "name", name) + } + return nil +} + +func deleteConfigMaps(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: postgresDB.Namespace}} + if err := c.Delete(ctx, cm); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("deleting ConfigMap %s: %w", name, err) + } + logger.Info("Deleted ConfigMap", "name", name) + } + return nil +} + +func deleteSecrets(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + for _, role := range []string{secretRoleAdmin, secretRoleRW} { + name := roleSecretName(postgresDB.Name, dbSpec.Name, role) + secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: postgresDB.Namespace}} + if err := c.Delete(ctx, secret); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("deleting Secret %s: %w", name, err) + } + logger.Info("Deleted Secret", "name", name) + } + } + return nil +} + +func buildRetainedRoles(postgresDBName string, retained []enterprisev4.DatabaseDefinition) []enterprisev4.ManagedRole { + roles := make([]enterprisev4.ManagedRole, 0, len(retained)*2) + for _, dbSpec := range retained { + roles = append(roles, + enterprisev4.ManagedRole{ + Name: adminRoleName(dbSpec.Name), + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleAdmin)}, + Key: secretKeyPassword}, + }, + enterprisev4.ManagedRole{ + Name: rwRoleName(dbSpec.Name), + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleRW)}, + Key: secretKeyPassword}, + }, + ) + } + return roles +} + +func patchManagedRolesOnDeletion(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster, retained []enterprisev4.DatabaseDefinition) error { + roles := buildRetainedRoles(postgresDB.Name, retained) + rolePatch := &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": cluster.APIVersion, + "kind": cluster.Kind, + "metadata": map[string]any{"name": cluster.Name, "namespace": cluster.Namespace}, + "spec": map[string]any{"managedRoles": roles}, + }, + } + if err := c.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManagerName(postgresDB.Name))); err != nil { + return fmt.Errorf("patching managed roles on deletion: %w", err) + } + log.FromContext(ctx).Info("Patched managed roles on deletion", "postgresDatabase", postgresDB.Name, "retainedRoles", len(roles)) + return nil +} + +func stripOwnerReference(obj metav1.Object, ownerUID types.UID) { + refs := obj.GetOwnerReferences() + filtered := make([]metav1.OwnerReference, 0, len(refs)) + for _, ref := range refs { + if ref.UID != ownerUID { + filtered = append(filtered, ref) + } + } + obj.SetOwnerReferences(filtered) +} + +func adoptResource(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, obj client.Object) error { + annotations := obj.GetAnnotations() + delete(annotations, annotationRetainedFrom) + obj.SetAnnotations(annotations) + if err := controllerutil.SetControllerReference(postgresDB, obj, scheme); err != nil { + return err + } + return c.Update(ctx, obj) +} + +func reconcileUserSecrets(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase) error { + for _, dbSpec := range postgresDB.Spec.Databases { + if err := ensureSecret(ctx, c, scheme, postgresDB, adminRoleName(dbSpec.Name), roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)); err != nil { + return err + } + if err := ensureSecret(ctx, c, scheme, postgresDB, rwRoleName(dbSpec.Name), roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)); err != nil { + return err + } + } + return nil +} + +func ensureSecret(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, roleName, secretName string) error { + secret, err := getSecret(ctx, c, postgresDB.Namespace, secretName) + if err != nil { + return err + } + logger := log.FromContext(ctx) + switch { + case secret == nil: + logger.Info("Creating missing user secret", "name", secretName) + return createUserSecret(ctx, c, scheme, postgresDB, roleName, secretName) + case secret.Annotations[annotationRetainedFrom] == postgresDB.Name: + logger.Info("Re-adopting orphaned secret", "name", secretName) + return adoptResource(ctx, c, scheme, postgresDB, secret) + } + return nil +} + +func getSecret(ctx context.Context, c client.Client, namespace, name string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) + if errors.IsNotFound(err) { + return nil, nil + } + if err != nil { + return nil, err + } + return secret, nil +} + +func createUserSecret(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, roleName, secretName string) error { + pw, err := generatePassword() + if err != nil { + return err + } + secret := buildPasswordSecret(postgresDB, secretName, roleName, pw) + if err := controllerutil.SetControllerReference(postgresDB, secret, scheme); err != nil { + return fmt.Errorf("setting owner reference on Secret %s: %w", secretName, err) + } + if err := c.Create(ctx, secret); err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + return nil +} + +func buildPasswordSecret(postgresDB *enterprisev4.PostgresDatabase, secretName, roleName, pw string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: postgresDB.Namespace, + Labels: map[string]string{labelManagedBy: "splunk-operator", labelCNPGReload: "true"}, + }, + Data: map[string][]byte{"username": []byte(roleName), secretKeyPassword: []byte(pw)}, + } +} + +func reconcileRoleConfigMaps(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, endpoints clusterEndpoints) error { + logger := log.FromContext(ctx) + for _, dbSpec := range postgresDB.Spec.Databases { + cmName := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: postgresDB.Namespace, + Labels: map[string]string{labelManagedBy: "splunk-operator"}, + }, + } + _, err := controllerutil.CreateOrUpdate(ctx, c, cm, func() error { + cm.Data = buildDatabaseConfigMapBody(dbSpec.Name, endpoints) + reAdopting := cm.Annotations[annotationRetainedFrom] == postgresDB.Name + if reAdopting { + logger.Info("Re-adopting orphaned ConfigMap", "name", cmName) + delete(cm.Annotations, annotationRetainedFrom) + } + if cm.CreationTimestamp.IsZero() || reAdopting { + return controllerutil.SetControllerReference(postgresDB, cm, scheme) + } + return nil + }) + if err != nil { + return fmt.Errorf("reconciling ConfigMap %s: %w", cmName, err) + } + } + return nil +} + +func buildDatabaseConfigMapBody(dbName string, endpoints clusterEndpoints) map[string]string { + data := map[string]string{ + "dbname": dbName, + "port": postgresPort, + "rw-host": endpoints.RWHost, + "ro-host": endpoints.ROHost, + "admin-user": adminRoleName(dbName), + "rw-user": rwRoleName(dbName), + } + if endpoints.PoolerRWHost != "" { + data["pooler-rw-host"] = endpoints.PoolerRWHost + } + if endpoints.PoolerROHost != "" { + data["pooler-ro-host"] = endpoints.PoolerROHost + } + return data +} + +func resolveClusterEndpoints(cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster, namespace string) clusterEndpoints { + // FQDN so consumers in other namespaces can resolve without extra config. + endpoints := clusterEndpoints{ + RWHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.WriteService, namespace), + ROHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.ReadService, namespace), + } + if cluster.Status.ConnectionPoolerStatus != nil && cluster.Status.ConnectionPoolerStatus.Enabled { + endpoints.PoolerRWHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readWriteEndpoint, namespace) + endpoints.PoolerROHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readOnlyEndpoint, namespace) + } + return endpoints +} + +func populateDatabaseStatus(postgresDB *enterprisev4.PostgresDatabase) []enterprisev4.DatabaseInfo { + databases := make([]enterprisev4.DatabaseInfo, 0, len(postgresDB.Spec.Databases)) + for _, dbSpec := range postgresDB.Spec.Databases { + databases = append(databases, enterprisev4.DatabaseInfo{ + Name: dbSpec.Name, + Ready: true, + AdminUserSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)}, Key: secretKeyPassword}, + RWUserSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)}, Key: secretKeyPassword}, + ConfigMapRef: &corev1.LocalObjectReference{Name: configMapName(postgresDB.Name, dbSpec.Name)}, + }) + } + return databases +} + +func hasNewDatabases(postgresDB *enterprisev4.PostgresDatabase) bool { + existing := make(map[string]bool, len(postgresDB.Status.Databases)) + for _, dbInfo := range postgresDB.Status.Databases { + existing[dbInfo.Name] = true + } + for _, dbSpec := range postgresDB.Spec.Databases { + if !existing[dbSpec.Name] { + return true + } + } + return false +} + +// Naming helpers — single source of truth shared by creation and status wiring. +func fieldManagerName(postgresDBName string) string { return fieldManagerPrefix + postgresDBName } +func adminRoleName(dbName string) string { return dbName + "_admin" } +func rwRoleName(dbName string) string { return dbName + "_rw" } +func cnpgDatabaseName(postgresDBName, dbName string) string { + return fmt.Sprintf("%s-%s", postgresDBName, dbName) +} +func roleSecretName(postgresDBName, dbName, role string) string { + return fmt.Sprintf("%s-%s-%s", postgresDBName, dbName, role) +} +func configMapName(postgresDBName, dbName string) string { + return fmt.Sprintf("%s-%s-config", postgresDBName, dbName) +} + +// generatePassword uses crypto/rand (via sethvargo/go-password) — predictable passwords +// are unacceptable for credentials that protect live database access. +func generatePassword() (string, error) { + return password.Generate(passwordLength, passwordDigits, passwordSymbols, false, true) +} diff --git a/pkg/postgresql/database/core/ports.go b/pkg/postgresql/database/core/ports.go new file mode 100644 index 000000000..0ee71bfe4 --- /dev/null +++ b/pkg/postgresql/database/core/ports.go @@ -0,0 +1,10 @@ +package core + +import "context" + +// DBRepo is the port for all direct database operations that require a +// superuser connection, bypassing any connection pooler. +// Adapters implementing this port live in adapter/. +type DBRepo interface { + ExecGrants(ctx context.Context, dbName string) error +} diff --git a/pkg/postgresql/database/core/types.go b/pkg/postgresql/database/core/types.go new file mode 100644 index 000000000..0d1fa116a --- /dev/null +++ b/pkg/postgresql/database/core/types.go @@ -0,0 +1,94 @@ +package core + +import ( + "time" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" +) + +type reconcileDBPhases string +type conditionTypes string +type conditionReasons string +type clusterReadyStatus string + +const ( + retryDelay = time.Second * 15 + clusterNotFoundRetryDelay = time.Second * 30 + + postgresPort string = "5432" + + readOnlyEndpoint string = "ro" + readWriteEndpoint string = "rw" + + deletionPolicyRetain string = "Retain" + + postgresDatabaseFinalizerName string = "postgresdatabases.enterprise.splunk.com/finalizer" + annotationRetainedFrom string = "enterprise.splunk.com/retained-from" + + fieldManagerPrefix string = "postgresdatabase-" + + secretRoleAdmin string = "admin" + secretRoleRW string = "rw" + secretKeyPassword string = "password" + + labelManagedBy string = "app.kubernetes.io/managed-by" + labelCNPGReload string = "cnpg.io/reload" + + // Password generation — no symbols for PostgreSQL connection string compatibility. + passwordLength = 32 + passwordDigits = 8 + passwordSymbols = 0 + + // DB reconcile phases + readyDBPhase reconcileDBPhases = "Ready" + pendingDBPhase reconcileDBPhases = "Pending" + provisioningDBPhase reconcileDBPhases = "Provisioning" + failedDBPhase reconcileDBPhases = "Failed" + + // condition types + clusterReady conditionTypes = "ClusterReady" + rolesReady conditionTypes = "RolesReady" + databasesReady conditionTypes = "DatabasesReady" + secretsReady conditionTypes = "SecretsReady" + configMapsReady conditionTypes = "ConfigMapsReady" + privilegesReady conditionTypes = "PrivilegesReady" + + // condition reasons + reasonClusterNotFound conditionReasons = "ClusterNotFound" + reasonClusterProvisioning conditionReasons = "ClusterProvisioning" + reasonClusterInfoFetchFailed conditionReasons = "ClusterInfoFetchNotPossible" + reasonClusterAvailable conditionReasons = "ClusterAvailable" + reasonDatabasesAvailable conditionReasons = "DatabasesAvailable" + reasonSecretsCreated conditionReasons = "SecretsCreated" + reasonSecretsCreationFailed conditionReasons = "SecretsCreationFailed" + reasonWaitingForCNPG conditionReasons = "WaitingForCNPG" + reasonUsersCreationFailed conditionReasons = "UsersCreationFailed" + reasonUsersAvailable conditionReasons = "UsersAvailable" + reasonRoleConflict conditionReasons = "RoleConflict" + reasonConfigMapsCreationFailed conditionReasons = "ConfigMapsCreationFailed" + reasonConfigMapsCreated conditionReasons = "ConfigMapsCreated" + reasonPrivilegesGranted conditionReasons = "PrivilegesGranted" + reasonPrivilegesGrantFailed conditionReasons = "PrivilegesGrantFailed" + + // ClusterReady sentinel values returned by ensureClusterReady. + // Exported so the controller adapter can switch on them if needed. + ClusterNotFound clusterReadyStatus = "NotFound" + ClusterNotReady clusterReadyStatus = "NotReady" + ClusterNoProvisionerRef clusterReadyStatus = "NoProvisionerRef" + ClusterReady clusterReadyStatus = "Ready" +) + +// clusterEndpoints holds fully-resolved connection hostnames for a cluster. +// PoolerRWHost and PoolerROHost are empty when connection pooling is disabled. +type clusterEndpoints struct { + RWHost string + ROHost string + PoolerRWHost string + PoolerROHost string +} + +// deletionPlan separates databases by their DeletionPolicy for the cleanup workflow. +type deletionPlan struct { + retained []enterprisev4.DatabaseDefinition + deleted []enterprisev4.DatabaseDefinition +}