diff --git a/.gitignore b/.gitignore index 5de8f6d85..050b59d70 100644 --- a/.gitignore +++ b/.gitignore @@ -100,4 +100,5 @@ bundle_*/ test/secret/*.log kubeconfig .devcontainer/devcontainer.json -kuttl-artifacts/* \ No newline at end of file +kuttl-artifacts/* +.tool-versions \ No newline at end of file diff --git a/PROJECT b/PROJECT index e87979069..9acd416fe 100644 --- a/PROJECT +++ b/PROJECT @@ -140,4 +140,31 @@ resources: kind: ObjectStorage path: github.com/splunk/splunk-operator/api/v4 version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: PostgresCluster + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: PostgresClusterClass + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: PostgresDatabase + path: github.com/splunk/splunk-operator/api/v4 + version: v4 version: "3" diff --git a/api/v4/postgrescluster_types.go b/api/v4/postgrescluster_types.go new file mode 100644 index 000000000..6ddb14c9d --- /dev/null +++ b/api/v4/postgrescluster_types.go @@ -0,0 +1,207 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ManagedRole represents a PostgreSQL role to be created and managed in the cluster. +type ManagedRole struct { + // Name of the role/user to create. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + + // PasswordSecretRef references a Secret and the key within it containing the password for this role. + // +optional + PasswordSecretRef *corev1.SecretKeySelector `json:"passwordSecretRef,omitempty"` + + // Exists controls whether the role should be present (true) or absent (false) in PostgreSQL. + // +kubebuilder:default=true + // +optional + Exists bool `json:"exists,omitempty"` +} + +// PostgresClusterSpec defines the desired state of PostgresCluster. +// Validation rules ensure immutability of Class, and that Storage and PostgresVersion can only be set once and cannot be removed or downgraded. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.postgresVersion) || (has(self.postgresVersion) && int(self.postgresVersion.split('.')[0]) >= int(oldSelf.postgresVersion.split('.')[0]))",messageExpression="!has(self.postgresVersion) ? 'postgresVersion cannot be removed once set (was: ' + oldSelf.postgresVersion + ')' : 'postgresVersion major version cannot be downgraded (from: ' + oldSelf.postgresVersion + ', to: ' + self.postgresVersion + ')'" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.storage) || (has(self.storage) && quantity(self.storage).compareTo(quantity(oldSelf.storage)) >= 0)",messageExpression="!has(self.storage) ? 'storage cannot be removed once set (was: ' + string(oldSelf.storage) + ')' : 'storage size cannot be decreased (from: ' + string(oldSelf.storage) + ', to: ' + string(self.storage) + ')'" +// +kubebuilder:validation:XValidation:rule="!has(self.connectionPoolerConfig)",message="connectionPoolerConfig cannot be overridden on PostgresCluster" +type PostgresClusterSpec struct { + // This field is IMMUTABLE after creation. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="class is immutable" + Class string `json:"class"` + + // Storage overrides the storage size from ClusterClass. + // Example: "5Gi" + // +optional + Storage *resource.Quantity `json:"storage,omitempty"` + + // Instances overrides the number of PostgreSQL instances from ClusterClass. + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=10 + Instances *int32 `json:"instances,omitempty"` + + // PostgresVersion is the PostgreSQL version (major or major.minor). + // Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + // +kubebuilder:validation:Pattern=`^[0-9]+(\.[0-9]+)?$` + // +optional + PostgresVersion *string `json:"postgresVersion,omitempty"` + + // Resources overrides CPU/memory resources from ClusterClass. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // PostgreSQL overrides PostgreSQL engine parameters from ClusterClass. + // Maps to postgresql.conf settings. + // Default empty map prevents panic. + // Example: {"shared_buffers": "128MB", "log_min_duration_statement": "500ms"} + // +optional + // +kubebuilder:default={} + PostgreSQLConfig map[string]string `json:"postgresqlConfig,omitempty"` + + // PgHBA contains pg_hba.conf host-based authentication rules. + // Defines client authentication and connection security (cluster-wide). + // Maps to pg_hba.conf settings. + // Default empty array prevents panic. + // Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + // +optional + // +kubebuilder:default={} + PgHBA []string `json:"pgHBA,omitempty"` + + // ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed for this cluster. + // When set, takes precedence over the class-level connectionPoolerEnabled value. + // +kubebuilder:default=false + // +optional + ConnectionPoolerEnabled *bool `json:"connectionPoolerEnabled,omitempty"` + + // Only takes effect when connection pooling is enabled. + // +optional + ConnectionPoolerConfig *ConnectionPoolerConfig `json:"connectionPoolerConfig,omitempty"` + + // ManagedRoles contains PostgreSQL roles that should be created in the cluster. + // This field supports Server-Side Apply with per-role granularity, allowing + // multiple PostgresDatabase controllers to manage different roles independently. + // +optional + // +listType=map + // +listMapKey=name + ManagedRoles []ManagedRole `json:"managedRoles,omitempty"` + + // ClusterDeletionPolicy controls the deletion behavior of the underlying CNPG Cluster when the PostgresCluster is deleted. + // +kubebuilder:validation:Enum=Delete;Retain + // +kubebuilder:default=Retain + // +optional + ClusterDeletionPolicy *string `json:"clusterDeletionPolicy,omitempty"` +} + +// PostgresClusterResources defines references to Kubernetes resources related to the PostgresCluster, such as ConfigMaps and Secrets. +type PostgresClusterResources struct { + // ConfigMapRef references the ConfigMap with connection endpoints. + // Contains: CLUSTER_ENDPOINTS, POOLER_ENDPOINTS (if connection pooler enabled) + // +optional + ConfigMapRef *corev1.LocalObjectReference `json:"configMapRef,omitempty"` + + // +optional + SuperUserSecretRef *corev1.SecretKeySelector `json:"secretRef,omitempty"` +} + +// PostgresClusterStatus defines the observed state of PostgresCluster. +type PostgresClusterStatus struct { + // Phase represents the current phase of the PostgresCluster. + // Values: "Pending", "Provisioning", "Failed", "Ready", "Deleting" + // +optional + Phase *string `json:"phase,omitempty"` + + // Conditions represent the latest available observations of the PostgresCluster's state. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // ProvisionerRef contains reference to the provisioner resource managing this PostgresCluster. + // Right now, only CNPG is supported. + // +optional + ProvisionerRef *corev1.ObjectReference `json:"provisionerRef,omitempty"` + + // ConnectionPoolerStatus contains the observed state of the connection pooler. + // Only populated when connection pooler is enabled in the PostgresClusterClass. + // +optional + ConnectionPoolerStatus *ConnectionPoolerStatus `json:"connectionPoolerStatus,omitempty"` + + // ManagedRolesStatus tracks the reconciliation status of managed roles. + // +optional + ManagedRolesStatus *ManagedRolesStatus `json:"managedRolesStatus,omitempty"` + + // Resources contains references to related Kubernetes resources like ConfigMaps and Secrets. + // +optional + Resources *PostgresClusterResources `json:"resources,omitempty"` +} + +// ManagedRolesStatus tracks the state of managed PostgreSQL roles. +type ManagedRolesStatus struct { + // Reconciled contains roles that have been successfully created and are ready. + // +optional + Reconciled []string `json:"reconciled,omitempty"` + + // Pending contains roles that are being created but not yet ready. + // +optional + Pending []string `json:"pending,omitempty"` + + // Failed contains roles that failed to reconcile with error messages. + // +optional + Failed map[string]string `json:"failed,omitempty"` +} + +// ConnectionPoolerStatus contains the observed state of the connection pooler. +type ConnectionPoolerStatus struct { + // Enabled indicates whether pooler is active for this cluster. + Enabled bool `json:"enabled"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:printcolumn:name="Class",type=string,JSONPath=`.spec.class` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// PostgresCluster is the Schema for the postgresclusters API. +type PostgresCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresClusterSpec `json:"spec,omitempty"` + Status PostgresClusterStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresClusterList contains a list of PostgresCluster. +type PostgresClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PostgresCluster{}, &PostgresClusterList{}) +} diff --git a/api/v4/postgresclusterclass_types.go b/api/v4/postgresclusterclass_types.go new file mode 100644 index 000000000..9945ec669 --- /dev/null +++ b/api/v4/postgresclusterclass_types.go @@ -0,0 +1,204 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +kubebuilder:validation:XValidation:rule="!has(self.cnpg) || self.provisioner == 'postgresql.cnpg.io'",message="cnpg config can only be set when provisioner is postgresql.cnpg.io" +// +kubebuilder:validation:XValidation:rule="!has(self.config) || !has(self.config.connectionPoolerEnabled) || !self.config.connectionPoolerEnabled || (has(self.cnpg) && has(self.cnpg.connectionPooler))",message="cnpg.connectionPooler must be set when config.connectionPoolerEnabled is true" +// PostgresClusterClassSpec defines the desired state of PostgresClusterClass. +// PostgresClusterClass is immutable after creation - it serves as a template for Cluster CRs. +type PostgresClusterClassSpec struct { + // Provisioner identifies which database provisioner to use. + // Currently supported: "postgresql.cnpg.io" (CloudNativePG) + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=postgresql.cnpg.io + Provisioner string `json:"provisioner"` + + // PostgresClusterConfig contains cluster-level configuration. + // These settings apply to PostgresCluster infrastructure. + // Can be overridden in PostgresCluster CR. + // +kubebuilder:default={} + // +optional + Config *PostgresClusterClassConfig `json:"config,omitempty"` + + // CNPG contains CloudNativePG-specific configuration and policies. + // Only used when Provisioner is "postgresql.cnpg.io" + // These settings CANNOT be overridden in PostgresCluster CR (platform policy). + // +optional + CNPG *CNPGConfig `json:"cnpg,omitempty"` +} + +// PostgresClusterClassConfig contains provider-agnostic cluster configuration. +// These fields define PostgresCluster infrastructure and can be overridden in PostgresCluster CR. +type PostgresClusterClassConfig struct { + // Instances is the number of database instances (1 primary + N replicas). + // Single instance (1) is suitable for development. + // High availability requires at least 3 instances (1 primary + 2 replicas). + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=10 + // +kubebuilder:default=1 + // +optional + Instances *int32 `json:"instances,omitempty"` + + // Storage is the size of persistent volume for each instance. + // Cannot be decreased after cluster creation (PostgreSQL limitation). + // Recommended minimum: 10Gi for production viability. + // Example: "50Gi", "100Gi", "1Ti" + // +kubebuilder:default="50Gi" + // +optional + Storage *resource.Quantity `json:"storage,omitempty"` + + // PostgresVersion is the PostgreSQL version (major or major.minor). + // Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + // +kubebuilder:validation:Pattern=`^[0-9]+(\.[0-9]+)?$` + // +kubebuilder:default="18" + // +optional + PostgresVersion *string `json:"postgresVersion,omitempty"` + + // Resources defines CPU and memory requests/limits per instance. + // All instances in the cluster have the same resources. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + + // PostgreSQLConfig contains PostgreSQL engine configuration parameters. + // Maps to postgresql.conf settings (cluster-wide). + // Example: {"max_connections": "200", "shared_buffers": "2GB"} + // +optional + PostgreSQLConfig map[string]string `json:"postgresqlConfig,omitempty"` + + // PgHBA contains pg_hba.conf host-based authentication rules. + // Defines client authentication and connection security (cluster-wide). + // Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + // +optional + PgHBA []string `json:"pgHBA,omitempty"` + + // ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed. + // When true, creates RW and RO pooler deployments for clusters using this class. + // Can be overridden in PostgresCluster CR. + // +kubebuilder:default=false + // +optional + ConnectionPoolerEnabled *bool `json:"connectionPoolerEnabled,omitempty"` +} + +// ConnectionPoolerMode defines the PgBouncer connection pooling strategy. +// +kubebuilder:validation:Enum=session;transaction;statement +type ConnectionPoolerMode string + +const ( + // ConnectionPoolerModeSession assigns a connection for the entire client session (most compatible). + ConnectionPoolerModeSession ConnectionPoolerMode = "session" + + // ConnectionPoolerModeTransaction returns the connection after each transaction (recommended). + ConnectionPoolerModeTransaction ConnectionPoolerMode = "transaction" + + // ConnectionPoolerModeStatement returns the connection after each statement (limited compatibility). + ConnectionPoolerModeStatement ConnectionPoolerMode = "statement" +) + +// ConnectionPoolerConfig defines PgBouncer connection pooler configuration. +// When enabled, creates RW and RO pooler deployments for clusters using this class. +type ConnectionPoolerConfig struct { + // Instances is the number of PgBouncer pod replicas. + // Higher values provide better availability and load distribution. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=10 + // +kubebuilder:default=3 + // +optional + Instances *int32 `json:"instances,omitempty"` + + // Mode defines the connection pooling strategy. + // +kubebuilder:default="transaction" + // +optional + Mode *ConnectionPoolerMode `json:"mode,omitempty"` + + // Config contains PgBouncer configuration parameters. + // Passed directly to CNPG Pooler spec.pgbouncer.parameters. + // See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + // +optional + Config map[string]string `json:"config,omitempty"` +} + +// CNPGConfig contains CloudNativePG-specific configuration. +// These fields control CNPG operator behavior and enforce platform policies. +// Cannot be overridden in Cluster CR. +type CNPGConfig struct { + // PrimaryUpdateMethod determines how the primary instance is updated. + // "restart" - tolerate brief downtime (suitable for development) + // "switchover" - minimal downtime via automated failover (production-grade) + // + // NOTE: When using "switchover", ensure clusterConfig.instances > 1. + // Switchover requires at least one replica to fail over to. + // +kubebuilder:validation:Enum=restart;switchover + // +kubebuilder:default=switchover + // +optional + PrimaryUpdateMethod *string `json:"primaryUpdateMethod,omitempty"` + + // ConnectionPooler contains PgBouncer connection pooler configuration. + // When enabled, creates RW and RO pooler deployments for clusters using this class. + // +optional + ConnectionPooler *ConnectionPoolerConfig `json:"connectionPooler,omitempty"` +} + +// PostgresClusterClassStatus defines the observed state of PostgresClusterClass. +type PostgresClusterClassStatus struct { + // Conditions represent the latest available observations of the PostgresClusterClass state. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // Phase represents the current phase of the PostgresClusterClass. + // Valid phases: "Ready", "Invalid" + // +optional + Phase *string `json:"phase,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:printcolumn:name="Provisioner",type=string,JSONPath=`.spec.provisioner` +// +kubebuilder:printcolumn:name="Instances",type=integer,JSONPath=`.spec.postgresClusterConfig.instances` +// +kubebuilder:printcolumn:name="Storage",type=string,JSONPath=`.spec.postgresClusterConfig.storage` +// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.postgresClusterConfig.postgresVersion` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// PostgresClusterClass is the Schema for the postgresclusterclasses API. +// PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. +type PostgresClusterClass struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresClusterClassSpec `json:"spec,omitempty"` + Status PostgresClusterClassStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresClusterClassList contains a list of PostgresClusterClass. +type PostgresClusterClassList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresClusterClass `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PostgresClusterClass{}, &PostgresClusterClassList{}) +} diff --git a/api/v4/postgresdatabase_types.go b/api/v4/postgresdatabase_types.go new file mode 100644 index 000000000..edab619b0 --- /dev/null +++ b/api/v4/postgresdatabase_types.go @@ -0,0 +1,96 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PostgresDatabaseSpec defines the desired state of PostgresDatabase. +// +kubebuilder:validation:XValidation:rule="self.clusterRef == oldSelf.clusterRef",message="clusterRef is immutable" +type PostgresDatabaseSpec struct { + // Reference to Postgres Cluster managed by postgresCluster controller + // +kubebuilder:validation:Required + ClusterRef corev1.LocalObjectReference `json:"clusterRef"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.filter(y, y.name == x.name).size() == 1)",message="database names must be unique" + Databases []DatabaseDefinition `json:"databases"` +} + +type DatabaseDefinition struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=30 + Name string `json:"name"` + Extensions []string `json:"extensions,omitempty"` + // +kubebuilder:validation:Enum=Delete;Retain + // +kubebuilder:default=Delete + DeletionPolicy string `json:"deletionPolicy,omitempty"` +} + +type DatabaseInfo struct { + Name string `json:"name"` + Ready bool `json:"ready"` + DatabaseRef *corev1.LocalObjectReference `json:"databaseRef,omitempty"` + AdminUserSecretRef *corev1.SecretKeySelector `json:"adminUserSecretRef,omitempty"` + RWUserSecretRef *corev1.SecretKeySelector `json:"rwUserSecretRef,omitempty"` + ConfigMapRef *corev1.LocalObjectReference `json:"configMap,omitempty"` +} + +// PostgresDatabaseStatus defines the observed state of PostgresDatabase. +type PostgresDatabaseStatus struct { + // +optional + Phase *string `json:"phase,omitempty"` + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + // +optional + Databases []DatabaseInfo `json:"databases,omitempty"` + // ObservedGeneration represents the .metadata.generation that the status was set based upon. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Cluster",type=string,JSONPath=`.spec.clusterRef.name` +// +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` +// +kubebuilder:printcolumn:name="Age",type=date,JSONPath=`.metadata.creationTimestamp` + +// PostgresDatabase is the Schema for the postgresdatabases API. +type PostgresDatabase struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PostgresDatabaseSpec `json:"spec,omitempty"` + Status PostgresDatabaseStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PostgresDatabaseList contains a list of PostgresDatabase. +type PostgresDatabaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PostgresDatabase `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PostgresDatabase{}, &PostgresDatabaseList{}) +} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 7ae136536..d9535fb93 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -22,6 +22,7 @@ package v4 import ( "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -180,6 +181,31 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CNPGConfig) DeepCopyInto(out *CNPGConfig) { + *out = *in + if in.PrimaryUpdateMethod != nil { + in, out := &in.PrimaryUpdateMethod, &out.PrimaryUpdateMethod + *out = new(string) + **out = **in + } + if in.ConnectionPooler != nil { + in, out := &in.ConnectionPooler, &out.ConnectionPooler + *out = new(ConnectionPoolerConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CNPGConfig. +func (in *CNPGConfig) DeepCopy() *CNPGConfig { + if in == nil { + return nil + } + out := new(CNPGConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -355,6 +381,108 @@ func (in *CommonSplunkSpec) DeepCopy() *CommonSplunkSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolerConfig) DeepCopyInto(out *ConnectionPoolerConfig) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(ConnectionPoolerMode) + **out = **in + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerConfig. +func (in *ConnectionPoolerConfig) DeepCopy() *ConnectionPoolerConfig { + if in == nil { + return nil + } + out := new(ConnectionPoolerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionPoolerStatus) DeepCopyInto(out *ConnectionPoolerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionPoolerStatus. +func (in *ConnectionPoolerStatus) DeepCopy() *ConnectionPoolerStatus { + if in == nil { + return nil + } + out := new(ConnectionPoolerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseDefinition) DeepCopyInto(out *DatabaseDefinition) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseDefinition. +func (in *DatabaseDefinition) DeepCopy() *DatabaseDefinition { + if in == nil { + return nil + } + out := new(DatabaseDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatabaseInfo) DeepCopyInto(out *DatabaseInfo) { + *out = *in + if in.DatabaseRef != nil { + in, out := &in.DatabaseRef, &out.DatabaseRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.AdminUserSecretRef != nil { + in, out := &in.AdminUserSecretRef, &out.AdminUserSecretRef + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.RWUserSecretRef != nil { + in, out := &in.RWUserSecretRef, &out.RWUserSecretRef + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(v1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseInfo. +func (in *DatabaseInfo) DeepCopy() *DatabaseInfo { + if in == nil { + return nil + } + out := new(DatabaseInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EsDefaults) DeepCopyInto(out *EsDefaults) { *out = *in @@ -742,6 +870,58 @@ func (in *LicenseManagerStatus) DeepCopy() *LicenseManagerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRole) DeepCopyInto(out *ManagedRole) { + *out = *in + if in.PasswordSecretRef != nil { + in, out := &in.PasswordSecretRef, &out.PasswordSecretRef + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRole. +func (in *ManagedRole) DeepCopy() *ManagedRole { + if in == nil { + return nil + } + out := new(ManagedRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedRolesStatus) DeepCopyInto(out *ManagedRolesStatus) { + *out = *in + if in.Reconciled != nil { + in, out := &in.Reconciled, &out.Reconciled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Pending != nil { + in, out := &in.Pending, &out.Pending + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Failed != nil { + in, out := &in.Failed, &out.Failed + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedRolesStatus. +func (in *ManagedRolesStatus) DeepCopy() *ManagedRolesStatus { + if in == nil { + return nil + } + out := new(ManagedRolesStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MonitoringConsole) DeepCopyInto(out *MonitoringConsole) { *out = *in @@ -946,6 +1126,490 @@ func (in *PhaseInfo) DeepCopy() *PhaseInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresCluster) DeepCopyInto(out *PostgresCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresCluster. +func (in *PostgresCluster) DeepCopy() *PostgresCluster { + if in == nil { + return nil + } + out := new(PostgresCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClass) DeepCopyInto(out *PostgresClusterClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClass. +func (in *PostgresClusterClass) DeepCopy() *PostgresClusterClass { + if in == nil { + return nil + } + out := new(PostgresClusterClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresClusterClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassConfig) DeepCopyInto(out *PostgresClusterClassConfig) { + *out = *in + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } + if in.PostgresVersion != nil { + in, out := &in.PostgresVersion, &out.PostgresVersion + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.PostgreSQLConfig != nil { + in, out := &in.PostgreSQLConfig, &out.PostgreSQLConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PgHBA != nil { + in, out := &in.PgHBA, &out.PgHBA + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConnectionPoolerEnabled != nil { + in, out := &in.ConnectionPoolerEnabled, &out.ConnectionPoolerEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassConfig. +func (in *PostgresClusterClassConfig) DeepCopy() *PostgresClusterClassConfig { + if in == nil { + return nil + } + out := new(PostgresClusterClassConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassList) DeepCopyInto(out *PostgresClusterClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresClusterClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassList. +func (in *PostgresClusterClassList) DeepCopy() *PostgresClusterClassList { + if in == nil { + return nil + } + out := new(PostgresClusterClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresClusterClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassSpec) DeepCopyInto(out *PostgresClusterClassSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(PostgresClusterClassConfig) + (*in).DeepCopyInto(*out) + } + if in.CNPG != nil { + in, out := &in.CNPG, &out.CNPG + *out = new(CNPGConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassSpec. +func (in *PostgresClusterClassSpec) DeepCopy() *PostgresClusterClassSpec { + if in == nil { + return nil + } + out := new(PostgresClusterClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterClassStatus) DeepCopyInto(out *PostgresClusterClassStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterClassStatus. +func (in *PostgresClusterClassStatus) DeepCopy() *PostgresClusterClassStatus { + if in == nil { + return nil + } + out := new(PostgresClusterClassStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterList) DeepCopyInto(out *PostgresClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterList. +func (in *PostgresClusterList) DeepCopy() *PostgresClusterList { + if in == nil { + return nil + } + out := new(PostgresClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterResources) DeepCopyInto(out *PostgresClusterResources) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.SuperUserSecretRef != nil { + in, out := &in.SuperUserSecretRef, &out.SuperUserSecretRef + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterResources. +func (in *PostgresClusterResources) DeepCopy() *PostgresClusterResources { + if in == nil { + return nil + } + out := new(PostgresClusterResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + x := (*in).DeepCopy() + *out = &x + } + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = new(int32) + **out = **in + } + if in.PostgresVersion != nil { + in, out := &in.PostgresVersion, &out.PostgresVersion + *out = new(string) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.PostgreSQLConfig != nil { + in, out := &in.PostgreSQLConfig, &out.PostgreSQLConfig + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PgHBA != nil { + in, out := &in.PgHBA, &out.PgHBA + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ConnectionPoolerEnabled != nil { + in, out := &in.ConnectionPoolerEnabled, &out.ConnectionPoolerEnabled + *out = new(bool) + **out = **in + } + if in.ConnectionPoolerConfig != nil { + in, out := &in.ConnectionPoolerConfig, &out.ConnectionPoolerConfig + *out = new(ConnectionPoolerConfig) + (*in).DeepCopyInto(*out) + } + if in.ManagedRoles != nil { + in, out := &in.ManagedRoles, &out.ManagedRoles + *out = make([]ManagedRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ClusterDeletionPolicy != nil { + in, out := &in.ClusterDeletionPolicy, &out.ClusterDeletionPolicy + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterSpec. +func (in *PostgresClusterSpec) DeepCopy() *PostgresClusterSpec { + if in == nil { + return nil + } + out := new(PostgresClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { + *out = *in + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ProvisionerRef != nil { + in, out := &in.ProvisionerRef, &out.ProvisionerRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.ConnectionPoolerStatus != nil { + in, out := &in.ConnectionPoolerStatus, &out.ConnectionPoolerStatus + *out = new(ConnectionPoolerStatus) + **out = **in + } + if in.ManagedRolesStatus != nil { + in, out := &in.ManagedRolesStatus, &out.ManagedRolesStatus + *out = new(ManagedRolesStatus) + (*in).DeepCopyInto(*out) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(PostgresClusterResources) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterStatus. +func (in *PostgresClusterStatus) DeepCopy() *PostgresClusterStatus { + if in == nil { + return nil + } + out := new(PostgresClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabase) DeepCopyInto(out *PostgresDatabase) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabase. +func (in *PostgresDatabase) DeepCopy() *PostgresDatabase { + if in == nil { + return nil + } + out := new(PostgresDatabase) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresDatabase) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabaseList) DeepCopyInto(out *PostgresDatabaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PostgresDatabase, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabaseList. +func (in *PostgresDatabaseList) DeepCopy() *PostgresDatabaseList { + if in == nil { + return nil + } + out := new(PostgresDatabaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PostgresDatabaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabaseSpec) DeepCopyInto(out *PostgresDatabaseSpec) { + *out = *in + out.ClusterRef = in.ClusterRef + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]DatabaseDefinition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabaseSpec. +func (in *PostgresDatabaseSpec) DeepCopy() *PostgresDatabaseSpec { + if in == nil { + return nil + } + out := new(PostgresDatabaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PostgresDatabaseStatus) DeepCopyInto(out *PostgresDatabaseStatus) { + *out = *in + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Databases != nil { + in, out := &in.Databases, &out.Databases + *out = make([]DatabaseInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresDatabaseStatus. +func (in *PostgresDatabaseStatus) DeepCopy() *PostgresDatabaseStatus { + if in == nil { + return nil + } + out := new(PostgresDatabaseStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PremiumAppsProps) DeepCopyInto(out *PremiumAppsProps) { *out = *in diff --git a/bundle.Dockerfile b/bundle.Dockerfile index c16e98425..7a08487c3 100644 --- a/bundle.Dockerfile +++ b/bundle.Dockerfile @@ -5,11 +5,10 @@ LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ LABEL operators.operatorframework.io.bundle.package.v1=splunk-operator -LABEL operators.operatorframework.io.bundle.channels.v1=stable -LABEL operators.operatorframework.io.bundle.channel.default.v1: stable -LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.39.0 +LABEL operators.operatorframework.io.bundle.channels.v1=alpha +LABEL operators.operatorframework.io.metrics.builder=operator-sdk-v1.42.0 LABEL operators.operatorframework.io.metrics.mediatype.v1=metrics+v1 -LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v3 +LABEL operators.operatorframework.io.metrics.project_layout=go.kubebuilder.io/v4 # Labels for testing. LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 diff --git a/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml b/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml index caf564122..4f191f44a 100644 --- a/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml +++ b/bundle/manifests/enterprise.splunk.com_clustermanagers.yaml @@ -651,8 +651,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1014,10 +1014,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1042,6 +1044,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1053,8 +1057,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1092,6 +1102,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1122,21 +1134,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1145,8 +1164,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1256,19 +1286,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1278,7 +1314,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1336,6 +1374,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1394,12 +1469,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1521,6 +1597,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1597,6 +1678,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1629,7 +1715,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2275,6 +2361,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -2283,8 +2370,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -2292,21 +2384,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -2315,11 +2414,30 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2561,19 +2679,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3248,15 +3372,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3438,12 +3560,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3522,7 +3642,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3942,6 +4062,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4076,7 +4301,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4386,11 +4610,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4416,6 +4642,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4427,8 +4655,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4467,6 +4701,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4497,21 +4733,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4521,8 +4765,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: @@ -4750,6 +5005,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -4758,8 +5014,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -4767,21 +5028,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -4790,9 +5058,23 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object telAppInstalled: description: Telemetry App installation flag diff --git a/bundle/manifests/enterprise.splunk.com_clustermasters.yaml b/bundle/manifests/enterprise.splunk.com_clustermasters.yaml index 24743e927..d4d231d0c 100644 --- a/bundle/manifests/enterprise.splunk.com_clustermasters.yaml +++ b/bundle/manifests/enterprise.splunk.com_clustermasters.yaml @@ -647,8 +647,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1010,10 +1010,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1038,6 +1040,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1049,8 +1053,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1088,6 +1098,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1118,21 +1130,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1141,8 +1160,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1252,19 +1282,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1274,7 +1310,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1332,6 +1370,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1390,12 +1465,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1517,6 +1593,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1593,6 +1674,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1625,7 +1711,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2271,6 +2357,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -2279,8 +2366,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -2288,21 +2380,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -2311,11 +2410,30 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2557,19 +2675,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3244,15 +3368,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3434,12 +3556,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3518,7 +3638,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3938,6 +4058,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4072,7 +4297,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4382,11 +4606,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4412,6 +4638,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4423,8 +4651,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4463,6 +4697,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4493,21 +4729,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4517,8 +4761,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: @@ -4743,6 +4998,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -4751,8 +5007,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -4760,21 +5021,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -4783,9 +5051,23 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object telAppInstalled: description: Telemetry App installation flag diff --git a/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml b/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml index 51682e450..86121b919 100644 --- a/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml +++ b/bundle/manifests/enterprise.splunk.com_indexerclusters.yaml @@ -654,8 +654,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1104,19 +1104,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1126,7 +1132,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1184,6 +1192,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1242,12 +1287,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1369,6 +1415,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1445,6 +1496,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1482,7 +1538,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2056,6 +2112,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2297,19 +2358,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -2984,15 +3051,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3174,12 +3239,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3258,7 +3321,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3678,6 +3741,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3812,7 +3980,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4840,8 +5007,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5290,19 +5457,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -5312,7 +5485,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5370,6 +5545,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5428,12 +5640,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -5555,6 +5768,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -5717,6 +5935,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -5742,8 +5965,7 @@ spec: type: integer type: object replicas: - description: Number of search head pods; a search head cluster will - be created if > 1 + description: Number of indexer cluster peers format: int32 type: integer resources: @@ -5754,7 +5976,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6328,6 +6550,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -6569,19 +6796,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -7256,15 +7489,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7446,12 +7677,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -7530,7 +7759,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -7950,6 +8179,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8084,7 +8418,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/bundle/manifests/enterprise.splunk.com_ingestorclusters.yaml b/bundle/manifests/enterprise.splunk.com_ingestorclusters.yaml index 0481e4a83..1e359a3ac 100644 --- a/bundle/manifests/enterprise.splunk.com_ingestorclusters.yaml +++ b/bundle/manifests/enterprise.splunk.com_ingestorclusters.yaml @@ -647,8 +647,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1010,10 +1010,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1038,6 +1040,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1049,8 +1053,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1088,6 +1098,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1118,21 +1130,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1141,8 +1160,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1252,19 +1282,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1274,7 +1310,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1332,6 +1370,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1390,12 +1465,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1517,6 +1593,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1679,6 +1760,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1717,7 +1803,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2291,6 +2377,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2532,19 +2623,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3219,15 +3316,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3409,12 +3504,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3493,7 +3586,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3913,6 +4006,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4047,7 +4245,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4366,11 +4563,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4396,6 +4595,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4407,8 +4608,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4447,6 +4654,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4477,21 +4686,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4501,8 +4718,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: diff --git a/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml b/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml index 25f8b3e1b..889c044b4 100644 --- a/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml +++ b/bundle/manifests/enterprise.splunk.com_licensemanagers.yaml @@ -641,8 +641,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1004,10 +1004,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1032,6 +1034,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1043,8 +1047,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1082,6 +1092,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1112,21 +1124,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1135,8 +1154,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1246,19 +1276,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1268,7 +1304,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1326,6 +1364,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1384,12 +1459,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1511,6 +1587,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1587,6 +1668,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1619,7 +1705,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2193,6 +2279,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2434,19 +2525,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3121,15 +3218,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3311,12 +3406,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3395,7 +3488,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3815,6 +3908,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3949,7 +4147,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4260,11 +4457,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4290,6 +4489,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4301,8 +4502,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4341,6 +4548,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4371,21 +4580,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4395,8 +4612,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: diff --git a/bundle/manifests/enterprise.splunk.com_licensemasters.yaml b/bundle/manifests/enterprise.splunk.com_licensemasters.yaml index 4687c7109..ab649b9d0 100644 --- a/bundle/manifests/enterprise.splunk.com_licensemasters.yaml +++ b/bundle/manifests/enterprise.splunk.com_licensemasters.yaml @@ -636,8 +636,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -999,10 +999,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1027,6 +1029,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1038,8 +1042,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1077,6 +1087,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1107,21 +1119,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1130,8 +1149,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1241,19 +1271,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1263,7 +1299,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1321,6 +1359,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1379,12 +1454,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1506,6 +1582,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1582,6 +1663,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1614,7 +1700,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2188,6 +2274,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2429,19 +2520,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3116,15 +3213,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3306,12 +3401,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3390,7 +3483,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3810,6 +3903,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3944,7 +4142,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4255,11 +4452,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4285,6 +4484,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4296,8 +4497,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4336,6 +4543,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4366,21 +4575,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4390,8 +4607,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: diff --git a/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml b/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml index c8954a274..7d5487622 100644 --- a/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml +++ b/bundle/manifests/enterprise.splunk.com_monitoringconsoles.yaml @@ -643,8 +643,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1006,10 +1006,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1034,6 +1036,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1045,8 +1049,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1084,6 +1094,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1114,21 +1126,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1137,8 +1156,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1248,19 +1278,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1270,7 +1306,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1328,6 +1366,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1386,12 +1461,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1513,6 +1589,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1589,6 +1670,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1621,7 +1707,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2195,6 +2281,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2436,19 +2527,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3123,15 +3220,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3313,12 +3408,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3397,7 +3490,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3817,6 +3910,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3951,7 +4149,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4261,11 +4458,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4291,6 +4490,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4302,8 +4503,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4342,6 +4549,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4372,21 +4581,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4396,8 +4613,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: @@ -5174,8 +5402,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5537,10 +5765,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -5565,6 +5795,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -5576,8 +5808,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -5615,6 +5853,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -5645,21 +5885,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -5668,8 +5915,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -5779,19 +6037,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -5801,7 +6065,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5859,6 +6125,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5917,12 +6220,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -6044,6 +6348,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -6120,6 +6429,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -6152,7 +6466,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6726,6 +7040,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -6967,19 +7286,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -7654,15 +7979,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7844,12 +8167,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -7928,7 +8249,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8348,6 +8669,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8482,7 +8908,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -8792,11 +9217,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -8822,6 +9249,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -8833,8 +9262,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -8873,6 +9308,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -8903,21 +9340,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -8927,8 +9372,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: diff --git a/bundle/manifests/enterprise.splunk.com_postgresclusterclasses.yaml b/bundle/manifests/enterprise.splunk.com_postgresclusterclasses.yaml new file mode 100644 index 000000000..9c9d19e35 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_postgresclusterclasses.yaml @@ -0,0 +1,334 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + creationTimestamp: null + labels: + name: splunk-operator + name: postgresclusterclasses.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresClusterClass + listKind: PostgresClusterClassList + plural: postgresclusterclasses + singular: postgresclusterclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.provisioner + name: Provisioner + type: string + - jsonPath: .spec.postgresClusterConfig.instances + name: Instances + type: integer + - jsonPath: .spec.postgresClusterConfig.storage + name: Storage + type: string + - jsonPath: .spec.postgresClusterConfig.postgresVersion + name: Version + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterClassSpec defines the desired state of PostgresClusterClass. + PostgresClusterClass is immutable after creation - it serves as a template for Cluster CRs. + properties: + cnpg: + description: |- + CNPG contains CloudNativePG-specific configuration and policies. + Only used when Provisioner is "postgresql.cnpg.io" + These settings CANNOT be overridden in PostgresCluster CR (platform policy). + properties: + connectionPooler: + description: |- + ConnectionPooler contains PgBouncer connection pooler configuration. + When enabled, creates RW and RO pooler deployments for clusters using this class. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + primaryUpdateMethod: + default: switchover + description: |- + PrimaryUpdateMethod determines how the primary instance is updated. + "restart" - tolerate brief downtime (suitable for development) + "switchover" - minimal downtime via automated failover (production-grade) + + NOTE: When using "switchover", ensure clusterConfig.instances > 1. + Switchover requires at least one replica to fail over to. + enum: + - restart + - switchover + type: string + type: object + config: + default: {} + description: |- + PostgresClusterConfig contains cluster-level configuration. + These settings apply to PostgresCluster infrastructure. + Can be overridden in PostgresCluster CR. + properties: + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed. + When true, creates RW and RO pooler deployments for clusters using this class. + Can be overridden in PostgresCluster CR. + type: boolean + instances: + default: 1 + description: |- + Instances is the number of database instances (1 primary + N replicas). + Single instance (1) is suitable for development. + High availability requires at least 3 instances (1 primary + 2 replicas). + format: int32 + maximum: 10 + minimum: 1 + type: integer + pgHBA: + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + default: "18" + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + description: |- + PostgreSQLConfig contains PostgreSQL engine configuration parameters. + Maps to postgresql.conf settings (cluster-wide). + Example: {"max_connections": "200", "shared_buffers": "2GB"} + type: object + resources: + description: |- + Resources defines CPU and memory requests/limits per instance. + All instances in the cluster have the same resources. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + default: 50Gi + description: |- + Storage is the size of persistent volume for each instance. + Cannot be decreased after cluster creation (PostgreSQL limitation). + Recommended minimum: 10Gi for production viability. + Example: "50Gi", "100Gi", "1Ti" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + provisioner: + description: |- + Provisioner identifies which database provisioner to use. + Currently supported: "postgresql.cnpg.io" (CloudNativePG) + enum: + - postgresql.cnpg.io + type: string + required: + - provisioner + type: object + x-kubernetes-validations: + - message: cnpg config can only be set when provisioner is postgresql.cnpg.io + rule: '!has(self.cnpg) || self.provisioner == ''postgresql.cnpg.io''' + - message: cnpg.connectionPooler must be set when config.connectionPoolerEnabled + is true + rule: '!has(self.config) || !has(self.config.connectionPoolerEnabled) + || !self.config.connectionPoolerEnabled || (has(self.cnpg) && has(self.cnpg.connectionPooler))' + status: + description: PostgresClusterClassStatus defines the observed state of + PostgresClusterClass. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresClusterClass state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + phase: + description: |- + Phase represents the current phase of the PostgresClusterClass. + Valid phases: "Ready", "Invalid" + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_postgresclusters.yaml b/bundle/manifests/enterprise.splunk.com_postgresclusters.yaml new file mode 100644 index 000000000..abc6ddfd0 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_postgresclusters.yaml @@ -0,0 +1,477 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + creationTimestamp: null + labels: + name: splunk-operator + name: postgresclusters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresCluster + listKind: PostgresClusterList + plural: postgresclusters + singular: postgrescluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.class + name: Class + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresCluster is the Schema for the postgresclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterSpec defines the desired state of PostgresCluster. + Validation rules ensure immutability of Class, and that Storage and PostgresVersion can only be set once and cannot be removed or downgraded. + properties: + class: + description: This field is IMMUTABLE after creation. + minLength: 1 + type: string + x-kubernetes-validations: + - message: class is immutable + rule: self == oldSelf + clusterDeletionPolicy: + default: Retain + description: ClusterDeletionPolicy controls the deletion behavior + of the underlying CNPG Cluster when the PostgresCluster is deleted. + enum: + - Delete + - Retain + type: string + connectionPoolerConfig: + description: Only takes effect when connection pooling is enabled. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed for this cluster. + When set, takes precedence over the class-level connectionPoolerEnabled value. + type: boolean + instances: + description: Instances overrides the number of PostgreSQL instances + from ClusterClass. + format: int32 + maximum: 10 + minimum: 1 + type: integer + managedRoles: + description: |- + ManagedRoles contains PostgreSQL roles that should be created in the cluster. + This field supports Server-Side Apply with per-role granularity, allowing + multiple PostgresDatabase controllers to manage different roles independently. + items: + description: ManagedRole represents a PostgreSQL role to be created + and managed in the cluster. + properties: + exists: + default: true + description: Exists controls whether the role should be present + (true) or absent (false) in PostgreSQL. + type: boolean + name: + description: Name of the role/user to create. + maxLength: 63 + minLength: 1 + type: string + passwordSecretRef: + description: PasswordSecretRef references a Secret and the key + within it containing the password for this role. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + pgHBA: + default: [] + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Maps to pg_hba.conf settings. + Default empty array prevents panic. + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + default: {} + description: |- + PostgreSQL overrides PostgreSQL engine parameters from ClusterClass. + Maps to postgresql.conf settings. + Default empty map prevents panic. + Example: {"shared_buffers": "128MB", "log_min_duration_statement": "500ms"} + type: object + resources: + description: Resources overrides CPU/memory resources from ClusterClass. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + description: |- + Storage overrides the storage size from ClusterClass. + Example: "5Gi" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - class + type: object + x-kubernetes-validations: + - messageExpression: '!has(self.postgresVersion) ? ''postgresVersion cannot + be removed once set (was: '' + oldSelf.postgresVersion + '')'' : ''postgresVersion + major version cannot be downgraded (from: '' + oldSelf.postgresVersion + + '', to: '' + self.postgresVersion + '')''' + rule: '!has(oldSelf.postgresVersion) || (has(self.postgresVersion) && + int(self.postgresVersion.split(''.'')[0]) >= int(oldSelf.postgresVersion.split(''.'')[0]))' + - messageExpression: '!has(self.storage) ? ''storage cannot be removed + once set (was: '' + string(oldSelf.storage) + '')'' : ''storage size + cannot be decreased (from: '' + string(oldSelf.storage) + '', to: + '' + string(self.storage) + '')''' + rule: '!has(oldSelf.storage) || (has(self.storage) && quantity(self.storage).compareTo(quantity(oldSelf.storage)) + >= 0)' + - message: connectionPoolerConfig cannot be overridden on PostgresCluster + rule: '!has(self.connectionPoolerConfig)' + status: + description: PostgresClusterStatus defines the observed state of PostgresCluster. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresCluster's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + connectionPoolerStatus: + description: |- + ConnectionPoolerStatus contains the observed state of the connection pooler. + Only populated when connection pooler is enabled in the PostgresClusterClass. + properties: + enabled: + description: Enabled indicates whether pooler is active for this + cluster. + type: boolean + type: object + managedRolesStatus: + description: ManagedRolesStatus tracks the reconciliation status of + managed roles. + properties: + failed: + additionalProperties: + type: string + description: Failed contains roles that failed to reconcile with + error messages. + type: object + pending: + description: Pending contains roles that are being created but + not yet ready. + items: + type: string + type: array + reconciled: + description: Reconciled contains roles that have been successfully + created and are ready. + items: + type: string + type: array + type: object + phase: + description: |- + Phase represents the current phase of the PostgresCluster. + Values: "Pending", "Provisioning", "Failed", "Ready", "Deleting" + type: string + provisionerRef: + description: |- + ProvisionerRef contains reference to the provisioner resource managing this PostgresCluster. + Right now, only CNPG is supported. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + resources: + description: Resources contains references to related Kubernetes resources + like ConfigMaps and Secrets. + properties: + configMapRef: + description: |- + ConfigMapRef references the ConfigMap with connection endpoints. + Contains: CLUSTER_ENDPOINTS, POOLER_ENDPOINTS (if connection pooler enabled) + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + secretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_postgresdatabases.yaml b/bundle/manifests/enterprise.splunk.com_postgresdatabases.yaml new file mode 100644 index 000000000..badbc70b8 --- /dev/null +++ b/bundle/manifests/enterprise.splunk.com_postgresdatabases.yaml @@ -0,0 +1,267 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + creationTimestamp: null + labels: + name: splunk-operator + name: postgresdatabases.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresDatabase + listKind: PostgresDatabaseList + plural: postgresdatabases + singular: postgresdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.clusterRef.name + name: Cluster + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresDatabase is the Schema for the postgresdatabases API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PostgresDatabaseSpec defines the desired state of PostgresDatabase. + properties: + clusterRef: + description: Reference to Postgres Cluster managed by postgresCluster + controller + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databases: + items: + properties: + deletionPolicy: + default: Delete + enum: + - Delete + - Retain + type: string + extensions: + items: + type: string + type: array + name: + maxLength: 30 + type: string + required: + - name + type: object + maxItems: 10 + minItems: 1 + type: array + x-kubernetes-validations: + - message: database names must be unique + rule: self.all(x, self.filter(y, y.name == x.name).size() == 1) + required: + - clusterRef + - databases + type: object + x-kubernetes-validations: + - message: clusterRef is immutable + rule: self.clusterRef == oldSelf.clusterRef + status: + description: PostgresDatabaseStatus defines the observed state of PostgresDatabase. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + databases: + items: + properties: + adminUserSecretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + configMap: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + name: + type: string + ready: + type: boolean + rwUserSecretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + observedGeneration: + description: ObservedGeneration represents the .metadata.generation + that the status was set based upon. + format: int64 + type: integer + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/enterprise.splunk.com_queues.yaml b/bundle/manifests/enterprise.splunk.com_queues.yaml index 90dca1f99..8939f7d05 100644 --- a/bundle/manifests/enterprise.splunk.com_queues.yaml +++ b/bundle/manifests/enterprise.splunk.com_queues.yaml @@ -84,34 +84,20 @@ spec: volumes: description: List of remote storage volumes items: - description: VolumeSpec defines remote volume config + description: SQSVolumeSpec defines a volume reference for SQS + queue authentication properties: - endpoint: - description: Remote volume URI - type: string name: description: Remote volume name - type: string - path: - description: Remote volume path - type: string - provider: - description: 'App Package Remote Store provider. Supported - values: aws, minio, azure, gcp.' - type: string - region: - description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + minLength: 1 type: string secretRef: - description: Secret object name - type: string - storageType: - description: 'Remote Storage type. Supported values: s3, - blob, gcs. s3 works with aws or minio providers, whereas - blob works with azure provider, gcs works for gcp.' + description: Remote volume secret ref + minLength: 1 type: string + required: + - name + - secretRef type: object type: array required: diff --git a/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml b/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml index 4c9359abe..d87e80b39 100644 --- a/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml +++ b/bundle/manifests/enterprise.splunk.com_searchheadclusters.yaml @@ -649,8 +649,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1012,10 +1012,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1040,6 +1042,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1051,8 +1055,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1090,6 +1100,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1120,21 +1132,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1143,8 +1162,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1254,19 +1284,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1276,7 +1312,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1334,6 +1372,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1392,12 +1467,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1519,6 +1595,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1595,6 +1676,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1632,7 +1718,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2206,6 +2292,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2447,19 +2538,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3134,15 +3231,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3324,12 +3419,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3408,7 +3501,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3828,6 +3921,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3962,7 +4160,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4284,11 +4481,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4314,6 +4513,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4325,8 +4526,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4365,6 +4572,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4395,21 +4604,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4419,8 +4636,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: @@ -5267,8 +5495,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5630,10 +5858,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -5658,6 +5888,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -5669,8 +5901,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -5708,6 +5946,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -5738,21 +5978,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -5761,8 +6008,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -6074,7 +6332,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6129,19 +6387,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -6151,7 +6415,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6209,6 +6475,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6267,12 +6570,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -6394,6 +6698,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -6470,6 +6779,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -6495,6 +6809,7 @@ spec: type: integer type: object replicas: + default: 3 description: Number of search head pods; a search head cluster will be created if > 1 format: int32 @@ -6507,7 +6822,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -7081,6 +7396,11 @@ spec: type: object type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -7322,19 +7642,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -8009,15 +8335,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8199,12 +8523,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8283,7 +8605,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8703,6 +9025,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8837,7 +9264,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -9159,11 +9585,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -9189,6 +9617,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -9200,8 +9630,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -9240,6 +9676,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -9270,21 +9708,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -9294,8 +9740,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: diff --git a/bundle/manifests/enterprise.splunk.com_standalones.yaml b/bundle/manifests/enterprise.splunk.com_standalones.yaml index 60c549249..16457a549 100644 --- a/bundle/manifests/enterprise.splunk.com_standalones.yaml +++ b/bundle/manifests/enterprise.splunk.com_standalones.yaml @@ -644,8 +644,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1007,10 +1007,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -1035,6 +1037,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1046,8 +1050,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -1085,6 +1095,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -1115,21 +1127,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -1138,8 +1157,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -1249,19 +1279,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -1271,7 +1307,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1329,6 +1367,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1387,12 +1462,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -1514,6 +1590,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -1590,6 +1671,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -1626,7 +1712,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -2272,6 +2358,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -2280,8 +2367,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -2289,21 +2381,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -2312,11 +2411,30 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -2558,19 +2676,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -3245,15 +3369,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3435,12 +3557,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3519,7 +3639,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3939,6 +4059,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4073,7 +4298,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4384,11 +4608,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -4414,6 +4640,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4425,8 +4653,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -4465,6 +4699,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -4495,21 +4731,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -4519,8 +4763,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: @@ -4742,6 +4997,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -4750,8 +5006,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -4759,21 +5020,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -4782,9 +5050,23 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object telAppInstalled: description: Telemetry App installation flag @@ -5419,8 +5701,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5782,10 +6064,12 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when scope @@ -5810,6 +6094,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -5821,8 +6107,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -5860,6 +6152,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -5890,21 +6184,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -5913,8 +6214,19 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object clusterManagerRef: @@ -6024,19 +6336,25 @@ spec: description: Storage configuration for /opt/splunk/etc volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' extraEnv: description: |- ExtraEnv refers to extra environment variables to be passed to the Splunk instance containers @@ -6046,7 +6364,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6104,6 +6424,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6162,12 +6519,13 @@ spec: environment variables) type: string imagePullPolicy: + default: IfNotPresent description: 'Sets pull policy for all images ("Always", "Never", or the default: "IfNotPresent")' enum: - Always - - Never - IfNotPresent + - Never type: string imagePullSecrets: description: |- @@ -6289,6 +6647,11 @@ spec: minimum: 0 type: integer livenessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 30 + timeoutSeconds: 30 description: LivenessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-a-liveness-command properties: failureThreshold: @@ -6365,6 +6728,11 @@ spec: minimum: 0 type: integer readinessProbe: + default: + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 description: ReadinessProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes properties: failureThreshold: @@ -6390,6 +6758,7 @@ spec: type: integer type: object replicas: + default: 1 description: Number of standalone pods format: int32 type: integer @@ -6401,7 +6770,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -7047,6 +7416,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -7055,8 +7425,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -7064,21 +7439,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -7087,11 +7469,30 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object startupProbe: + default: + failureThreshold: 12 + initialDelaySeconds: 40 + periodSeconds: 30 + timeoutSeconds: 30 description: StartupProbe as defined in https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes properties: failureThreshold: @@ -7333,19 +7734,25 @@ spec: description: Storage configuration for /opt/splunk/var volume properties: ephemeralStorage: - description: |- - If true, ephemeral (emptyDir) storage will be used - default false + default: false + description: If true, ephemeral (emptyDir) storage will be used type: boolean storageCapacity: description: Storage capacity to request persistent volume claims - (default=”10Gi” for etc and "100Gi" for var) + (default="10Gi" for etc and "100Gi" for var) type: string storageClassName: description: Name of StorageClass to use for persistent volume claims type: string type: object + x-kubernetes-validations: + - message: storageClassName and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageClassName) > 0 && self.ephemeralStorage + == true)' + - message: storageCapacity and ephemeralStorage are mutually exclusive + rule: '!(size(self.storageCapacity) > 0 && self.ephemeralStorage + == true)' volumes: description: List of one or more Kubernetes volumes. These will be mounted in all pod containers as as /mnt/ @@ -8020,15 +8427,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8210,12 +8615,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8294,7 +8697,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8714,6 +9117,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8848,7 +9356,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -9159,11 +9666,13 @@ spec: properties: location: description: Location relative to the volume path + minLength: 1 type: string name: description: Logical name for the set of apps placed in this location. Logical name must be unique to the appRepo + minLength: 1 type: string premiumAppsProps: description: Properties for premium apps, fill in when @@ -9189,6 +9698,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -9200,8 +9711,14 @@ spec: volumeName: description: Remote Storage Volume name type: string + required: + - location + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map appsRepoPollIntervalSeconds: description: |- Interval in seconds to check the Remote Storage for App changes. @@ -9240,6 +9757,8 @@ spec: type: description: 'Type: enterpriseSecurity for now, can accommodate itsi etc.. later' + enum: + - enterpriseSecurity type: string type: object scope: @@ -9270,21 +9789,29 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. + apps reside. Required for aws, optional for azure + and gcp. type: string secretRef: description: Secret object name @@ -9294,8 +9821,19 @@ spec: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array type: object appSrcDeployStatus: @@ -9520,6 +10058,7 @@ spec: type: integer name: description: Splunk index name + minLength: 1 type: string remotePath: description: Index location relative to the remote volume @@ -9528,8 +10067,13 @@ spec: volumeName: description: Remote Volume name type: string + required: + - name type: object type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map volumes: description: List of remote storage volumes items: @@ -9537,21 +10081,28 @@ spec: properties: endpoint: description: Remote volume URI + minLength: 1 type: string name: description: Remote volume name + minLength: 1 type: string path: description: Remote volume path + minLength: 1 type: string provider: description: 'App Package Remote Store provider. Supported values: aws, minio, azure, gcp.' + enum: + - aws + - minio + - azure + - gcp type: string region: description: Region of the remote storage volume where apps - reside. Used for aws, if provided. Not used for minio - and azure. + reside. Required for aws, optional for azure and gcp. type: string secretRef: description: Secret object name @@ -9560,9 +10111,23 @@ spec: description: 'Remote Storage type. Supported values: s3, blob, gcs. s3 works with aws or minio providers, whereas blob works with azure provider, gcs works for gcp.' + enum: + - s3 + - blob + - gcs type: string + required: + - endpoint + - name + - path type: object + x-kubernetes-validations: + - message: region is required when provider is aws + rule: self.provider != 'aws' || size(self.region) > 0 type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map type: object telAppInstalled: description: Telemetry App installation flag diff --git a/bundle/manifests/splunk-operator.clusterserviceversion.yaml b/bundle/manifests/splunk-operator.clusterserviceversion.yaml index 113078cbc..841068555 100644 --- a/bundle/manifests/splunk-operator.clusterserviceversion.yaml +++ b/bundle/manifests/splunk-operator.clusterserviceversion.yaml @@ -69,7 +69,7 @@ metadata: "name": "clustermanager-sample", "namespace": "splunk-operator" }, - "spec": {} + "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", @@ -130,7 +130,7 @@ metadata: "name": "monitoringconsole-sample", "namespace": "splunk-operator" }, - "spec": {} + "spec": null }, { "apiVersion": "enterprise.splunk.com/v4", @@ -141,7 +141,91 @@ metadata: ], "name": "objectstorage-sample" }, - "spec": {} + "spec": { + "provider": "s3", + "s3": { + "path": "bucket/path" + } + } + }, + { + "apiVersion": "enterprise.splunk.com/v4", + "kind": "PostgresCluster", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "splunk-operator" + }, + "name": "postgresql-cluster-dev" + }, + "spec": { + "class": "postgresql-dev" + } + }, + { + "apiVersion": "enterprise.splunk.com/v4", + "kind": "PostgresClusterClass", + "metadata": { + "name": "postgresql-dev" + }, + "spec": { + "cnpg": { + "connectionPooler": { + "config": { + "max_client_conn": "100" + }, + "instances": 2, + "mode": "transaction" + }, + "primaryUpdateMethod": "restart" + }, + "config": { + "connectionPoolerEnabled": true, + "instances": 1, + "postgresVersion": "18", + "resources": { + "limits": { + "cpu": "1", + "memory": "2Gi" + }, + "requests": { + "cpu": "500m", + "memory": "1Gi" + } + }, + "storage": "10Gi" + }, + "provisioner": "postgresql.cnpg.io" + } + }, + { + "apiVersion": "enterprise.splunk.com/v4", + "kind": "PostgresDatabase", + "metadata": { + "name": "splunk-databases" + }, + "spec": { + "clusterRef": { + "name": "postgresql-cluster-dev" + }, + "databases": [ + { + "deletionPolicy": "Delete", + "extensions": [ + "pg_stat_statements", + "pgcrypto" + ], + "name": "kvstore" + }, + { + "deletionPolicy": "Delete", + "extensions": [ + "pg_trgm" + ], + "name": "analytics" + } + ] + } }, { "apiVersion": "enterprise.splunk.com/v4", @@ -152,7 +236,14 @@ metadata: ], "name": "queue-sample" }, - "spec": {} + "spec": { + "provider": "sqs", + "sqs": { + "authRegion": "us-west-2", + "dlq": "dlq", + "name": "queue" + } + } }, { "apiVersion": "enterprise.splunk.com/v4", @@ -180,13 +271,13 @@ metadata: "name": "standalone-sample", "namespace": "splunk-operator" }, - "spec": {} + "spec": null } ] capabilities: Seamless Upgrades categories: Big Data, Logging & Tracing, Monitoring, Security, AI/Machine Learning containerImage: splunk/splunk-operator@sha256:c4e0d314622699496f675760aad314520d050a66627fdf33e1e21fa28ca85d50 - createdAt: "2026-03-02T17:02:29Z" + createdAt: "2026-03-25T20:21:54Z" description: The Splunk Operator for Kubernetes enables you to quickly and easily deploy Splunk Enterprise on your choice of private or public cloud provider. The Operator simplifies scaling and management of Splunk Enterprise by automating @@ -269,6 +360,23 @@ spec: kind: ObjectStorage name: objectstorages.enterprise.splunk.com version: v4 + - description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + displayName: Postgres Cluster Class + kind: PostgresClusterClass + name: postgresclusterclasses.enterprise.splunk.com + version: v4 + - description: PostgresCluster is the Schema for the postgresclusters API. + displayName: Postgres Cluster + kind: PostgresCluster + name: postgresclusters.enterprise.splunk.com + version: v4 + - description: PostgresDatabase is the Schema for the postgresdatabases API. + displayName: Postgres Database + kind: PostgresDatabase + name: postgresdatabases.enterprise.splunk.com + version: v4 - description: Queue is the Schema for the queues API displayName: Queue kind: Queue @@ -371,6 +479,8 @@ spec: - licensemasters - monitoringconsoles - objectstorages + - postgresclusters + - postgresdatabases - queues - searchheadclusters - standalones @@ -393,6 +503,8 @@ spec: - licensemasters/finalizers - monitoringconsoles/finalizers - objectstorages/finalizers + - postgresclusters/finalizers + - postgresdatabases/finalizers - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers @@ -409,6 +521,8 @@ spec: - licensemasters/status - monitoringconsoles/status - objectstorages/status + - postgresclusters/status + - postgresdatabases/status - queues/status - searchheadclusters/status - standalones/status @@ -416,6 +530,35 @@ spec: - get - patch - update + - apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - get + - list + - watch + - apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - databases + - poolers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get - apiGroups: - authentication.k8s.io resources: @@ -465,7 +608,7 @@ spec: fieldRef: fieldPath: metadata.annotations['olm.targetNamespaces'] - name: RELATED_IMAGE_SPLUNK_ENTERPRISE - value: docker.io/splunk/splunk:10.2.0 + value: docker.io/splunk/splunk - name: OPERATOR_NAME value: splunk-operator - name: SPLUNK_GENERAL_TERMS @@ -474,7 +617,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name - image: docker.io/splunk/splunk-operator:3.1.0 + image: controller:latest imagePullPolicy: Always livenessProbe: httpGet: @@ -587,7 +730,7 @@ spec: name: Splunk Inc. url: www.splunk.com relatedImages: - - image: docker.io/splunk/splunk:10.2.0 + - image: docker.io/splunk/splunk name: splunk-enterprise replaces: splunk-operator.v3.0.0 version: 3.1.0 diff --git a/bundle/metadata/annotations.yaml b/bundle/metadata/annotations.yaml index 890fd61c7..87b0891ef 100644 --- a/bundle/metadata/annotations.yaml +++ b/bundle/metadata/annotations.yaml @@ -4,11 +4,10 @@ annotations: operators.operatorframework.io.bundle.manifests.v1: manifests/ operators.operatorframework.io.bundle.metadata.v1: metadata/ operators.operatorframework.io.bundle.package.v1: splunk-operator - operators.operatorframework.io.bundle.channels.v1: stable - operators.operatorframework.io.bundle.channel.default.v1: stable - operators.operatorframework.io.metrics.builder: operator-sdk-v1.31.0 + operators.operatorframework.io.bundle.channels.v1: alpha + operators.operatorframework.io.metrics.builder: operator-sdk-v1.42.0 operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 - operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v3 + operators.operatorframework.io.metrics.project_layout: go.kubebuilder.io/v4 # Annotations for testing. operators.operatorframework.io.test.mediatype.v1: scorecard+v1 diff --git a/cmd/main.go b/cmd/main.go index 3c9e223cc..9b960c2bc 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -53,6 +53,8 @@ import ( enterpriseApiV3 "github.com/splunk/splunk-operator/api/v3" enterpriseApi "github.com/splunk/splunk-operator/api/v4" "github.com/splunk/splunk-operator/internal/controller" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" //+kubebuilder:scaffold:imports //extapi "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" ) @@ -66,6 +68,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(enterpriseApi.AddToScheme(scheme)) utilruntime.Must(enterpriseApiV3.AddToScheme(scheme)) + utilruntime.Must(cnpgv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme //utilruntime.Must(extapi.AddToScheme(scheme)) } @@ -279,6 +282,20 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Telemetry") os.Exit(1) } + if err := (&controller.PostgresDatabaseReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PostgresDatabase") + os.Exit(1) + } + if err := (&controller.PostgresClusterReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PostgresCluster") + os.Exit(1) + } // Setup centralized validation webhook server (opt-in via ENABLE_VALIDATION_WEBHOOK env var, defaults to false) enableWebhooks := os.Getenv("ENABLE_VALIDATION_WEBHOOK") diff --git a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml index a19a639ae..d6d40d56a 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermanagers.yaml @@ -639,8 +639,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1302,7 +1302,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1360,6 +1362,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1664,7 +1703,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3321,15 +3360,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3511,12 +3548,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3595,7 +3630,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -4015,6 +4050,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4149,7 +4289,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml index 77b835376..adc19b9d7 100644 --- a/config/crd/bases/enterprise.splunk.com_clustermasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_clustermasters.yaml @@ -635,8 +635,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1298,7 +1298,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1356,6 +1358,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1660,7 +1699,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3317,15 +3356,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3507,12 +3544,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3591,7 +3626,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -4011,6 +4046,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4145,7 +4285,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 8ae972d7c..bf9f312d1 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -642,8 +642,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1120,7 +1120,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1178,6 +1180,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1487,7 +1526,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3000,15 +3039,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3190,12 +3227,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3274,7 +3309,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3694,6 +3729,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3828,7 +3968,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -4856,8 +4995,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5334,7 +5473,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5392,6 +5533,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -5786,7 +5964,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -7299,15 +7477,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7489,12 +7665,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -7573,7 +7747,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -7993,6 +8167,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8127,7 +8406,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 44238ca7a..a7d91abaa 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -635,8 +635,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1298,7 +1298,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1356,6 +1358,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1752,7 +1791,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3265,15 +3304,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3455,12 +3492,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3539,7 +3574,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3959,6 +3994,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4093,7 +4233,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml index b65b6beaf..bedc18ed2 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemanagers.yaml @@ -629,8 +629,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1292,7 +1292,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1350,6 +1352,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1654,7 +1693,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3167,15 +3206,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3357,12 +3394,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3441,7 +3476,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3861,6 +3896,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3995,7 +4135,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml index c2a96b9e7..03563e31d 100644 --- a/config/crd/bases/enterprise.splunk.com_licensemasters.yaml +++ b/config/crd/bases/enterprise.splunk.com_licensemasters.yaml @@ -624,8 +624,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1287,7 +1287,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1345,6 +1347,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1649,7 +1688,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3162,15 +3201,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3352,12 +3389,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3436,7 +3471,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3856,6 +3891,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3990,7 +4130,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml index f5142a14f..2a77ad99e 100644 --- a/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml +++ b/config/crd/bases/enterprise.splunk.com_monitoringconsoles.yaml @@ -631,8 +631,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1294,7 +1294,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1352,6 +1354,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1656,7 +1695,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3169,15 +3208,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3359,12 +3396,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3443,7 +3478,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3863,6 +3898,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -3997,7 +4137,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -5251,8 +5390,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -5914,7 +6053,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -5972,6 +6113,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6276,7 +6454,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -7789,15 +7967,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -7979,12 +8155,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8063,7 +8237,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8483,6 +8657,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8617,7 +8896,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml b/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml new file mode 100644 index 000000000..70ef3536b --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_postgresclusterclasses.yaml @@ -0,0 +1,326 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: postgresclusterclasses.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresClusterClass + listKind: PostgresClusterClassList + plural: postgresclusterclasses + singular: postgresclusterclass + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.provisioner + name: Provisioner + type: string + - jsonPath: .spec.postgresClusterConfig.instances + name: Instances + type: integer + - jsonPath: .spec.postgresClusterConfig.storage + name: Storage + type: string + - jsonPath: .spec.postgresClusterConfig.postgresVersion + name: Version + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterClassSpec defines the desired state of PostgresClusterClass. + PostgresClusterClass is immutable after creation - it serves as a template for Cluster CRs. + properties: + cnpg: + description: |- + CNPG contains CloudNativePG-specific configuration and policies. + Only used when Provisioner is "postgresql.cnpg.io" + These settings CANNOT be overridden in PostgresCluster CR (platform policy). + properties: + connectionPooler: + description: |- + ConnectionPooler contains PgBouncer connection pooler configuration. + When enabled, creates RW and RO pooler deployments for clusters using this class. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + primaryUpdateMethod: + default: switchover + description: |- + PrimaryUpdateMethod determines how the primary instance is updated. + "restart" - tolerate brief downtime (suitable for development) + "switchover" - minimal downtime via automated failover (production-grade) + + NOTE: When using "switchover", ensure clusterConfig.instances > 1. + Switchover requires at least one replica to fail over to. + enum: + - restart + - switchover + type: string + type: object + config: + default: {} + description: |- + PostgresClusterConfig contains cluster-level configuration. + These settings apply to PostgresCluster infrastructure. + Can be overridden in PostgresCluster CR. + properties: + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed. + When true, creates RW and RO pooler deployments for clusters using this class. + Can be overridden in PostgresCluster CR. + type: boolean + instances: + default: 1 + description: |- + Instances is the number of database instances (1 primary + N replicas). + Single instance (1) is suitable for development. + High availability requires at least 3 instances (1 primary + 2 replicas). + format: int32 + maximum: 10 + minimum: 1 + type: integer + pgHBA: + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + default: "18" + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + description: |- + PostgreSQLConfig contains PostgreSQL engine configuration parameters. + Maps to postgresql.conf settings (cluster-wide). + Example: {"max_connections": "200", "shared_buffers": "2GB"} + type: object + resources: + description: |- + Resources defines CPU and memory requests/limits per instance. + All instances in the cluster have the same resources. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + default: 50Gi + description: |- + Storage is the size of persistent volume for each instance. + Cannot be decreased after cluster creation (PostgreSQL limitation). + Recommended minimum: 10Gi for production viability. + Example: "50Gi", "100Gi", "1Ti" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + provisioner: + description: |- + Provisioner identifies which database provisioner to use. + Currently supported: "postgresql.cnpg.io" (CloudNativePG) + enum: + - postgresql.cnpg.io + type: string + required: + - provisioner + type: object + x-kubernetes-validations: + - message: cnpg config can only be set when provisioner is postgresql.cnpg.io + rule: '!has(self.cnpg) || self.provisioner == ''postgresql.cnpg.io''' + - message: cnpg.connectionPooler must be set when config.connectionPoolerEnabled + is true + rule: '!has(self.config) || !has(self.config.connectionPoolerEnabled) + || !self.config.connectionPoolerEnabled || (has(self.cnpg) && has(self.cnpg.connectionPooler))' + status: + description: PostgresClusterClassStatus defines the observed state of + PostgresClusterClass. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresClusterClass state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + phase: + description: |- + Phase represents the current phase of the PostgresClusterClass. + Valid phases: "Ready", "Invalid" + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml b/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml new file mode 100644 index 000000000..14ba142d6 --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_postgresclusters.yaml @@ -0,0 +1,469 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: postgresclusters.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresCluster + listKind: PostgresClusterList + plural: postgresclusters + singular: postgrescluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.class + name: Class + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresCluster is the Schema for the postgresclusters API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: |- + PostgresClusterSpec defines the desired state of PostgresCluster. + Validation rules ensure immutability of Class, and that Storage and PostgresVersion can only be set once and cannot be removed or downgraded. + properties: + class: + description: This field is IMMUTABLE after creation. + minLength: 1 + type: string + x-kubernetes-validations: + - message: class is immutable + rule: self == oldSelf + clusterDeletionPolicy: + default: Retain + description: ClusterDeletionPolicy controls the deletion behavior + of the underlying CNPG Cluster when the PostgresCluster is deleted. + enum: + - Delete + - Retain + type: string + connectionPoolerConfig: + description: Only takes effect when connection pooling is enabled. + properties: + config: + additionalProperties: + type: string + description: |- + Config contains PgBouncer configuration parameters. + Passed directly to CNPG Pooler spec.pgbouncer.parameters. + See: https://cloudnative-pg.io/docs/1.28/connection_pooling/#pgbouncer-configuration-options + type: object + instances: + default: 3 + description: |- + Instances is the number of PgBouncer pod replicas. + Higher values provide better availability and load distribution. + format: int32 + maximum: 10 + minimum: 1 + type: integer + mode: + default: transaction + description: Mode defines the connection pooling strategy. + enum: + - session + - transaction + - statement + type: string + type: object + connectionPoolerEnabled: + default: false + description: |- + ConnectionPoolerEnabled controls whether PgBouncer connection pooling is deployed for this cluster. + When set, takes precedence over the class-level connectionPoolerEnabled value. + type: boolean + instances: + description: Instances overrides the number of PostgreSQL instances + from ClusterClass. + format: int32 + maximum: 10 + minimum: 1 + type: integer + managedRoles: + description: |- + ManagedRoles contains PostgreSQL roles that should be created in the cluster. + This field supports Server-Side Apply with per-role granularity, allowing + multiple PostgresDatabase controllers to manage different roles independently. + items: + description: ManagedRole represents a PostgreSQL role to be created + and managed in the cluster. + properties: + exists: + default: true + description: Exists controls whether the role should be present + (true) or absent (false) in PostgreSQL. + type: boolean + name: + description: Name of the role/user to create. + maxLength: 63 + minLength: 1 + type: string + passwordSecretRef: + description: PasswordSecretRef references a Secret and the key + within it containing the password for this role. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + pgHBA: + default: [] + description: |- + PgHBA contains pg_hba.conf host-based authentication rules. + Defines client authentication and connection security (cluster-wide). + Maps to pg_hba.conf settings. + Default empty array prevents panic. + Example: ["hostssl all all 0.0.0.0/0 scram-sha-256"] + items: + type: string + type: array + postgresVersion: + description: |- + PostgresVersion is the PostgreSQL version (major or major.minor). + Examples: "18" (latest 18.x), "18.1" (specific minor), "17", "16" + pattern: ^[0-9]+(\.[0-9]+)?$ + type: string + postgresqlConfig: + additionalProperties: + type: string + default: {} + description: |- + PostgreSQL overrides PostgreSQL engine parameters from ClusterClass. + Maps to postgresql.conf settings. + Default empty map prevents panic. + Example: {"shared_buffers": "128MB", "log_min_duration_statement": "500ms"} + type: object + resources: + description: Resources overrides CPU/memory resources from ClusterClass. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This field depends on the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + storage: + anyOf: + - type: integer + - type: string + description: |- + Storage overrides the storage size from ClusterClass. + Example: "5Gi" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + required: + - class + type: object + x-kubernetes-validations: + - messageExpression: '!has(self.postgresVersion) ? ''postgresVersion cannot + be removed once set (was: '' + oldSelf.postgresVersion + '')'' : ''postgresVersion + major version cannot be downgraded (from: '' + oldSelf.postgresVersion + + '', to: '' + self.postgresVersion + '')''' + rule: '!has(oldSelf.postgresVersion) || (has(self.postgresVersion) && + int(self.postgresVersion.split(''.'')[0]) >= int(oldSelf.postgresVersion.split(''.'')[0]))' + - messageExpression: '!has(self.storage) ? ''storage cannot be removed + once set (was: '' + string(oldSelf.storage) + '')'' : ''storage size + cannot be decreased (from: '' + string(oldSelf.storage) + '', to: + '' + string(self.storage) + '')''' + rule: '!has(oldSelf.storage) || (has(self.storage) && quantity(self.storage).compareTo(quantity(oldSelf.storage)) + >= 0)' + - message: connectionPoolerConfig cannot be overridden on PostgresCluster + rule: '!has(self.connectionPoolerConfig)' + status: + description: PostgresClusterStatus defines the observed state of PostgresCluster. + properties: + conditions: + description: Conditions represent the latest available observations + of the PostgresCluster's state. + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + connectionPoolerStatus: + description: |- + ConnectionPoolerStatus contains the observed state of the connection pooler. + Only populated when connection pooler is enabled in the PostgresClusterClass. + properties: + enabled: + description: Enabled indicates whether pooler is active for this + cluster. + type: boolean + type: object + managedRolesStatus: + description: ManagedRolesStatus tracks the reconciliation status of + managed roles. + properties: + failed: + additionalProperties: + type: string + description: Failed contains roles that failed to reconcile with + error messages. + type: object + pending: + description: Pending contains roles that are being created but + not yet ready. + items: + type: string + type: array + reconciled: + description: Reconciled contains roles that have been successfully + created and are ready. + items: + type: string + type: array + type: object + phase: + description: |- + Phase represents the current phase of the PostgresCluster. + Values: "Pending", "Provisioning", "Failed", "Ready", "Deleting" + type: string + provisionerRef: + description: |- + ProvisionerRef contains reference to the provisioner resource managing this PostgresCluster. + Right now, only CNPG is supported. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic + resources: + description: Resources contains references to related Kubernetes resources + like ConfigMaps and Secrets. + properties: + configMapRef: + description: |- + ConfigMapRef references the ConfigMap with connection endpoints. + Contains: CLUSTER_ENDPOINTS, POOLER_ENDPOINTS (if connection pooler enabled) + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + secretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml b/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml new file mode 100644 index 000000000..d8df534d3 --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_postgresdatabases.yaml @@ -0,0 +1,259 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: postgresdatabases.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: PostgresDatabase + listKind: PostgresDatabaseList + plural: postgresdatabases + singular: postgresdatabase + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .spec.clusterRef.name + name: Cluster + type: string + - jsonPath: .status.phase + name: Phase + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v4 + schema: + openAPIV3Schema: + description: PostgresDatabase is the Schema for the postgresdatabases API. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PostgresDatabaseSpec defines the desired state of PostgresDatabase. + properties: + clusterRef: + description: Reference to Postgres Cluster managed by postgresCluster + controller + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databases: + items: + properties: + deletionPolicy: + default: Delete + enum: + - Delete + - Retain + type: string + extensions: + items: + type: string + type: array + name: + maxLength: 30 + type: string + required: + - name + type: object + maxItems: 10 + minItems: 1 + type: array + x-kubernetes-validations: + - message: database names must be unique + rule: self.all(x, self.filter(y, y.name == x.name).size() == 1) + required: + - clusterRef + - databases + type: object + x-kubernetes-validations: + - message: clusterRef is immutable + rule: self.clusterRef == oldSelf.clusterRef + status: + description: PostgresDatabaseStatus defines the observed state of PostgresDatabase. + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + databases: + items: + properties: + adminUserSecretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + configMap: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + databaseRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + name: + type: string + ready: + type: boolean + rwUserSecretRef: + description: SecretKeySelector selects a key of a Secret. + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + type: array + observedGeneration: + description: ObservedGeneration represents the .metadata.generation + that the status was set based upon. + format: int64 + type: integer + phase: + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml index 4f4674b32..2128ce7d6 100644 --- a/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_searchheadclusters.yaml @@ -637,8 +637,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1300,7 +1300,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1358,6 +1360,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1667,7 +1706,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3180,15 +3219,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3370,12 +3407,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3454,7 +3489,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -3874,6 +3909,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4008,7 +4148,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -5344,8 +5483,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -6181,7 +6320,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -6264,7 +6403,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6322,6 +6463,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6632,7 +6810,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -8145,15 +8323,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8335,12 +8511,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8419,7 +8593,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8839,6 +9013,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -8973,7 +9252,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/bases/enterprise.splunk.com_standalones.yaml b/config/crd/bases/enterprise.splunk.com_standalones.yaml index 8c8c5035c..5f36c74d1 100644 --- a/config/crd/bases/enterprise.splunk.com_standalones.yaml +++ b/config/crd/bases/enterprise.splunk.com_standalones.yaml @@ -632,8 +632,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -1295,7 +1295,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -1353,6 +1355,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -1661,7 +1700,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -3318,15 +3357,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -3508,12 +3545,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -3592,7 +3627,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -4012,6 +4047,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -4146,7 +4286,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- @@ -5550,8 +5689,8 @@ spec: most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + compute a sum by iterating through the elements of this field and subtracting + "weight" from the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. items: description: The weights of all of the matched WeightedPodAffinityTerm @@ -6213,7 +6352,9 @@ spec: a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. + description: |- + Name of the environment variable. + May consist of any printable ASCII characters except '='. type: string value: description: |- @@ -6271,6 +6412,43 @@ spec: - fieldPath type: object x-kubernetes-map-type: atomic + fileKeyRef: + description: |- + FileKeyRef selects a key of the env file. + Requires the EnvFiles feature gate to be enabled. + properties: + key: + description: |- + The key within the env file. An invalid key will prevent the pod from starting. + The keys defined within a source may consist of any printable ASCII characters except '='. + During Alpha stage of the EnvFiles feature gate, the key size is limited to 128 characters. + type: string + optional: + default: false + description: |- + Specify whether the file or its key must be defined. If the file or key + does not exist, then the env var is not published. + If optional is set to true and the specified key does not exist, + the environment variable will not be set in the Pod's containers. + + If optional is set to false and the specified key does not exist, + an error will be returned during Pod creation. + type: boolean + path: + description: |- + The path within the volume from which to select the file. + Must be relative and may not contain the '..' path or start with '..'. + type: string + volumeName: + description: The name of the volume mount containing + the env file. + type: string + required: + - key + - path + - volumeName + type: object + x-kubernetes-map-type: atomic resourceFieldRef: description: |- Selects a resource of the container: only resources limits and requests @@ -6580,7 +6758,7 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the + This field depends on the DynamicResourceAllocation feature gate. This field is immutable. It can only be set for containers. @@ -8237,15 +8415,13 @@ spec: volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, - it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass - will be applied to the claim but it's not allowed to reset this field to empty string once it is set. - If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass - will be set by the persistentvolume controller if it exists. + it can be changed after the claim is created. An empty string or nil value indicates that no + VolumeAttributesClass will be applied to the claim. If the claim enters an Infeasible error state, + this field can be reset to its previous value (including nil) to cancel the modification. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ - (Beta) Using this field requires the VolumeAttributesClass feature gate to be enabled (off by default). type: string volumeMode: description: |- @@ -8427,12 +8603,10 @@ spec: description: |- glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. Deprecated: Glusterfs is deprecated and the in-tree glusterfs type is no longer supported. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + description: endpoints is the endpoint name that details + Glusterfs topology. type: string path: description: |- @@ -8511,7 +8685,7 @@ spec: description: |- iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md + More info: https://kubernetes.io/docs/concepts/storage/volumes/#iscsi properties: chapAuthDiscovery: description: chapAuthDiscovery defines whether support iSCSI @@ -8931,6 +9105,111 @@ spec: type: array x-kubernetes-list-type: atomic type: object + podCertificate: + description: |- + Projects an auto-rotating credential bundle (private key and certificate + chain) that the pod can use either as a TLS client or server. + + Kubelet generates a private key and uses it to send a + PodCertificateRequest to the named signer. Once the signer approves the + request and issues a certificate chain, Kubelet writes the key and + certificate chain to the pod filesystem. The pod does not start until + certificates have been issued for each podCertificate projected volume + source in its spec. + + Kubelet will begin trying to rotate the certificate at the time indicated + by the signer using the PodCertificateRequest.Status.BeginRefreshAt + timestamp. + + Kubelet can write a single file, indicated by the credentialBundlePath + field, or separate files, indicated by the keyPath and + certificateChainPath fields. + + The credential bundle is a single file in PEM format. The first PEM + entry is the private key (in PKCS#8 format), and the remaining PEM + entries are the certificate chain issued by the signer (typically, + signers will return their certificate chain in leaf-to-root order). + + Prefer using the credential bundle format, since your application code + can read it atomically. If you use keyPath and certificateChainPath, + your application must make two separate file reads. If these coincide + with a certificate rotation, it is possible that the private key and leaf + certificate you read may not correspond to each other. Your application + will need to check for this condition, and re-read until they are + consistent. + + The named signer controls chooses the format of the certificate it + issues; consult the signer implementation's documentation to learn how to + use the certificates it issues. + properties: + certificateChainPath: + description: |- + Write the certificate chain at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + credentialBundlePath: + description: |- + Write the credential bundle at this path in the projected volume. + + The credential bundle is a single file that contains multiple PEM blocks. + The first PEM block is a PRIVATE KEY block, containing a PKCS#8 private + key. + + The remaining blocks are CERTIFICATE blocks, containing the issued + certificate chain from the signer (leaf and any intermediates). + + Using credentialBundlePath lets your Pod's application code make a single + atomic read that retrieves a consistent key and certificate chain. If you + project them to separate files, your application code will need to + additionally check that the leaf certificate was issued to the key. + type: string + keyPath: + description: |- + Write the key at this path in the projected volume. + + Most applications should use credentialBundlePath. When using keyPath + and certificateChainPath, your application needs to check that the key + and leaf certificate are consistent, because it is possible to read the + files mid-rotation. + type: string + keyType: + description: |- + The type of keypair Kubelet will generate for the pod. + + Valid values are "RSA3072", "RSA4096", "ECDSAP256", "ECDSAP384", + "ECDSAP521", and "ED25519". + type: string + maxExpirationSeconds: + description: |- + maxExpirationSeconds is the maximum lifetime permitted for the + certificate. + + Kubelet copies this value verbatim into the PodCertificateRequests it + generates for this projection. + + If omitted, kube-apiserver will set it to 86400(24 hours). kube-apiserver + will reject values shorter than 3600 (1 hour). The maximum allowable + value is 7862400 (91 days). + + The signer implementation is then free to issue a certificate with any + lifetime *shorter* than MaxExpirationSeconds, but no shorter than 3600 + seconds (1 hour). This constraint is enforced by kube-apiserver. + `kubernetes.io` signers will never issue certificates with a lifetime + longer than 24 hours. + format: int32 + type: integer + signerName: + description: Kubelet's generated CSRs will be + addressed to this signer. + type: string + required: + - keyType + - signerName + type: object secret: description: secret information about the secret data to project @@ -9065,7 +9344,6 @@ spec: description: |- rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. Deprecated: RBD is deprecated and the in-tree rbd type is no longer supported. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: description: |- diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 21dd480ce..648316baf 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -13,6 +13,9 @@ resources: - bases/enterprise.splunk.com_ingestorclusters.yaml - bases/enterprise.splunk.com_queues.yaml - bases/enterprise.splunk.com_objectstorages.yaml +- bases/enterprise.splunk.com_postgresdatabases.yaml +- bases/enterprise.splunk.com_postgresclusterclasses.yaml +- bases/enterprise.splunk.com_postgresclusters.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml index 3fa109139..f96e1158b 100644 --- a/config/manifests/bases/splunk-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/splunk-operator.clusterserviceversion.yaml @@ -68,6 +68,23 @@ spec: kind: ObjectStorage name: objectstorages.enterprise.splunk.com version: v4 + - description: |- + PostgresClusterClass is the Schema for the postgresclusterclasses API. + PostgresClusterClass defines a reusable template and policy for postgres cluster provisioning. + displayName: Postgres Cluster Class + kind: PostgresClusterClass + name: postgresclusterclasses.enterprise.splunk.com + version: v4 + - description: PostgresCluster is the Schema for the postgresclusters API. + displayName: Postgres Cluster + kind: PostgresCluster + name: postgresclusters.enterprise.splunk.com + version: v4 + - description: PostgresDatabase is the Schema for the postgresdatabases API. + displayName: Postgres Database + kind: PostgresDatabase + name: postgresdatabases.enterprise.splunk.com + version: v4 - description: Queue is the Schema for the queues API displayName: Queue kind: Queue diff --git a/config/rbac/postgrescluster_editor_role.yaml b/config/rbac/postgrescluster_editor_role.yaml new file mode 100644 index 000000000..13884ce4b --- /dev/null +++ b/config/rbac/postgrescluster_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgrescluster-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters/status + verbs: + - get diff --git a/config/rbac/postgrescluster_viewer_role.yaml b/config/rbac/postgrescluster_viewer_role.yaml new file mode 100644 index 000000000..0474151b3 --- /dev/null +++ b/config/rbac/postgrescluster_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgrescluster-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusters/status + verbs: + - get diff --git a/config/rbac/postgresclusterclass_editor_role.yaml b/config/rbac/postgresclusterclass_editor_role.yaml new file mode 100644 index 000000000..a634510ff --- /dev/null +++ b/config/rbac/postgresclusterclass_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresclusterclass-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses/status + verbs: + - get diff --git a/config/rbac/postgresclusterclass_viewer_role.yaml b/config/rbac/postgresclusterclass_viewer_role.yaml new file mode 100644 index 000000000..4da318ff2 --- /dev/null +++ b/config/rbac/postgresclusterclass_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresclusterclass-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses/status + verbs: + - get diff --git a/config/rbac/postgresdatabase_editor_role.yaml b/config/rbac/postgresdatabase_editor_role.yaml new file mode 100644 index 000000000..21891af10 --- /dev/null +++ b/config/rbac/postgresdatabase_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresdatabase-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases/status + verbs: + - get diff --git a/config/rbac/postgresdatabase_viewer_role.yaml b/config/rbac/postgresdatabase_viewer_role.yaml new file mode 100644 index 000000000..702fab391 --- /dev/null +++ b/config/rbac/postgresdatabase_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresdatabase-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - postgresdatabases/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7873f18e1..d676ac24a 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -55,6 +55,8 @@ rules: - licensemasters - monitoringconsoles - objectstorages + - postgresclusters + - postgresdatabases - queues - searchheadclusters - standalones @@ -77,6 +79,8 @@ rules: - licensemasters/finalizers - monitoringconsoles/finalizers - objectstorages/finalizers + - postgresclusters/finalizers + - postgresdatabases/finalizers - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers @@ -93,6 +97,8 @@ rules: - licensemasters/status - monitoringconsoles/status - objectstorages/status + - postgresclusters/status + - postgresdatabases/status - queues/status - searchheadclusters/status - standalones/status @@ -100,3 +106,32 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - postgresclusterclasses + verbs: + - get + - list + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters + - databases + - poolers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - postgresql.cnpg.io + resources: + - clusters/status + - poolers/status + verbs: + - get diff --git a/config/samples/enterprise_v4_postgrescluster_default.yaml b/config/samples/enterprise_v4_postgrescluster_default.yaml new file mode 100644 index 000000000..6669aceb2 --- /dev/null +++ b/config/samples/enterprise_v4_postgrescluster_default.yaml @@ -0,0 +1,12 @@ +# This is a sample PostgresCluster manifest with default values for all fields. +# Defaults are inherited from the ClusterClass "postgresql-dev" (see enterprise_v4_clusterclass_dev.yaml) and can be overridden here. +apiVersion: enterprise.splunk.com/v4 +kind: PostgresCluster +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresql-cluster-dev +spec: + class: postgresql-dev + diff --git a/config/samples/enterprise_v4_postgrescluster_dev.yaml b/config/samples/enterprise_v4_postgrescluster_dev.yaml new file mode 100644 index 000000000..b5c6b8700 --- /dev/null +++ b/config/samples/enterprise_v4_postgrescluster_dev.yaml @@ -0,0 +1,28 @@ +# Sample PostgresCluster using Postgres-dev ClusterClass with overriding defaults +# This sample demonstrates how to override default values from the ClusterClass "postgresql-dev" (see enterprise_v4_clusterclass_dev.yaml) in a PostgresCluster manifest. +# Overrides include changing storage, changing PostgreSQL version, and modifying resources. +apiVersion: enterprise.splunk.com/v4 +kind: PostgresCluster +metadata: + labels: + app.kubernetes.io/name: splunk-operator + app.kubernetes.io/managed-by: kustomize + name: postgresql-cluster-dev +spec: + # Reference the ClusterClass to inherit defaults - this is required, immutable, and must match the name of an existing ClusterClass + class: postgresql-dev + clusterDeletionPolicy: Retain + instances: 3 + # Storage and PostgreSQL version are overridden from the ClusterClass defaults. Validation rules on the PostgresCluster resource will prevent removing these fields or setting them to lower values than the original overrides. + storage: 1Gi + postgresVersion: "15.10" + resources: + requests: + cpu: "250m" + memory: "512Mi" + limits: + cpu: "500m" + memory: "1Gi" + # Enable connection pooler for this cluster + # Takes precedence over the class-level connectionPoolerEnabled value + connectionPoolerEnabled: true \ No newline at end of file diff --git a/config/samples/enterprise_v4_postgresclusterclass_dev.yaml b/config/samples/enterprise_v4_postgresclusterclass_dev.yaml new file mode 100644 index 000000000..a9846e36c --- /dev/null +++ b/config/samples/enterprise_v4_postgresclusterclass_dev.yaml @@ -0,0 +1,39 @@ +--- +# Development PostgresClusterClass +# Minimal configuration for local development and testing +apiVersion: enterprise.splunk.com/v4 +kind: PostgresClusterClass +metadata: + name: postgresql-dev +spec: + provisioner: postgresql.cnpg.io + + config: + # Single instance - no HA (suitable for development) + instances: 1 + + # Small storage for development + storage: 10Gi + + # Latest PostgreSQL 18 + postgresVersion: "18" + + # Minimal resources + resources: + requests: + cpu: "500m" + memory: "1Gi" + limits: + cpu: "1" + memory: "2Gi" + connectionPoolerEnabled: true + + cnpg: + # Restart method - tolerate downtime in dev + primaryUpdateMethod: restart + connectionPooler: + instances: 2 + mode: transaction + config: + max_client_conn: "100" + diff --git a/config/samples/enterprise_v4_postgresclusterclass_prod.yaml b/config/samples/enterprise_v4_postgresclusterclass_prod.yaml new file mode 100644 index 000000000..56d9f232a --- /dev/null +++ b/config/samples/enterprise_v4_postgresclusterclass_prod.yaml @@ -0,0 +1,80 @@ +--- +# Production PostgresClusterClass +# Full configuration with HA, security, and tuned PostgreSQL settings +apiVersion: enterprise.splunk.com/v4 +kind: PostgresClusterClass +metadata: + name: postgresql-prod +spec: + provisioner: postgresql.cnpg.io + + config: + # High availability - 1 primary + 2 replicas + instances: 3 + + # Production storage + storage: 100Gi + + # PostgreSQL 18.1 (specific minor version) + postgresVersion: "18.1" + + # Production-grade resources + resources: + requests: + cpu: "2" + memory: "8Gi" + limits: + cpu: "4" + memory: "16Gi" + + # Tuned PostgreSQL configuration for OLTP workload + postgresqlConfig: + # Connection settings + max_connections: "200" + + # Memory settings (based on 8GB RAM) + shared_buffers: "2GB" + effective_cache_size: "6GB" + maintenance_work_mem: "512MB" + work_mem: "20MB" + + # WAL settings + wal_buffers: "16MB" + min_wal_size: "1GB" + max_wal_size: "4GB" + + # Query tuning + random_page_cost: "1.1" # SSD optimization + effective_io_concurrency: "200" + + # Logging + log_destination: "stderr" + logging_collector: "on" + log_min_duration_statement: "1000" # Log queries > 1s + + # Secure pg_hba configuration + pgHBA: + # Reject all non-SSL connections + - "hostnossl all all 0.0.0.0/0 reject" + # Require SSL + password authentication + - "hostssl all all 0.0.0.0/0 scram-sha-256" + + # Enable connection pooler for clusters using this class + connectionPoolerEnabled: true + + cnpg: + # Switchover method - minimal downtime via automated failover + primaryUpdateMethod: switchover + + # Connection pooler configuration (PgBouncer) + connectionPooler: + # Number of PgBouncer pod replicas + instances: 3 + # Pooling mode + mode: transaction + # PgBouncer configuration parameters + config: + # Maximum number of client connections allowed + max_client_conn: "100" + # Default number of server connections per user/database pair + default_pool_size: "20" diff --git a/config/samples/enterprise_v4_postgresdatabase.yaml b/config/samples/enterprise_v4_postgresdatabase.yaml new file mode 100644 index 000000000..874393548 --- /dev/null +++ b/config/samples/enterprise_v4_postgresdatabase.yaml @@ -0,0 +1,18 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: splunk-databases + # namespace: default +spec: + clusterRef: + name: postgresql-cluster-dev + databases: + - name: kvstore + extensions: + - pg_stat_statements + - pgcrypto + deletionPolicy: Delete + - name: analytics + extensions: + - pg_trgm + deletionPolicy: Delete \ No newline at end of file diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 34c05ab05..b2d13b188 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -16,4 +16,7 @@ resources: - enterprise_v4_ingestorcluster.yaml - enterprise_v4_queue.yaml - enterprise_v4_objectstorage.yaml +- enterprise_v4_postgresdatabase.yaml +- enterprise_v4_postgresclusterclass_dev.yaml +- enterprise_v4_postgrescluster_default.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/validation-tests/database/01-invalid-duplicate-names.yaml b/config/samples/validation-tests/database/01-invalid-duplicate-names.yaml new file mode 100644 index 000000000..95cd4d19b --- /dev/null +++ b/config/samples/validation-tests/database/01-invalid-duplicate-names.yaml @@ -0,0 +1,14 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-duplicate-names + namespace: default +spec: + clusterRef: + name: postgres-cluster + databases: + - name: kvstore + extensions: + - pg_stat_statements + - name: analytics + - name: kvstore # DUPLICATE! Should fail with: "database names must be unique" diff --git a/config/samples/validation-tests/database/02-invalid-immutability-update.yaml b/config/samples/validation-tests/database/02-invalid-immutability-update.yaml new file mode 100644 index 000000000..73dfb300b --- /dev/null +++ b/config/samples/validation-tests/database/02-invalid-immutability-update.yaml @@ -0,0 +1,19 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-postgresdatabase + namespace: default +spec: + clusterRef: + name: different-cluster # CHANGED! Should fail with: "clusterRef is immutable" + databases: + - name: kvstore + extensions: + - pg_stat_statements + - pgcrypto + deletionPolicy: Retain + - name: analytics + extensions: + - pg_trgm + deletionPolicy: Delete + - name: metrics diff --git a/config/samples/validation-tests/database/03-invalid-deletion-policy.yaml b/config/samples/validation-tests/database/03-invalid-deletion-policy.yaml new file mode 100644 index 000000000..bb911e88c --- /dev/null +++ b/config/samples/validation-tests/database/03-invalid-deletion-policy.yaml @@ -0,0 +1,11 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-invalid-policy + namespace: default +spec: + clusterRef: + name: postgres-cluster + databases: + - name: kvstore + deletionPolicy: Archive # INVALID! Only "Delete" or "Retain" allowed diff --git a/config/samples/validation-tests/database/04-invalid-missing-fields.yaml b/config/samples/validation-tests/database/04-invalid-missing-fields.yaml new file mode 100644 index 000000000..c0376eb21 --- /dev/null +++ b/config/samples/validation-tests/database/04-invalid-missing-fields.yaml @@ -0,0 +1,11 @@ +apiVersion: enterprise.splunk.com/v4 +kind: PostgresDatabase +metadata: + name: test-missing-fields + namespace: default +spec: + # Missing clusterRef - REQUIRED field! + databases: + - name: kvstore + - extensions: # Missing name - REQUIRED in DatabaseDefinition! + - pg_stat_statements diff --git a/go.mod b/go.mod index 3615f95ed..2f9b00cc6 100644 --- a/go.mod +++ b/go.mod @@ -13,26 +13,30 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.85 github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1 github.com/aws/aws-sdk-go-v2/service/sqs v1.42.21 + github.com/cloudnative-pg/cloudnative-pg v1.28.0 github.com/go-logr/logr v1.4.3 github.com/google/go-cmp v0.7.0 github.com/google/uuid v1.6.0 + github.com/jackc/pgx/v5 v5.8.0 github.com/joho/godotenv v1.5.1 github.com/minio/minio-go/v7 v7.0.16 github.com/onsi/ginkgo v1.16.5 github.com/onsi/ginkgo/v2 v2.28.1 github.com/onsi/gomega v1.39.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_golang v1.23.2 + github.com/sethvargo/go-password v0.3.1 github.com/stretchr/testify v1.11.1 github.com/wk8/go-ordered-map/v2 v2.1.7 - go.uber.org/zap v1.27.0 + go.uber.org/zap v1.27.1 google.golang.org/api v0.155.0 - k8s.io/api v0.33.0 - k8s.io/apiextensions-apiserver v0.33.0 - k8s.io/apimachinery v0.33.0 - k8s.io/client-go v0.33.0 + k8s.io/api v0.34.2 + k8s.io/apiextensions-apiserver v0.34.2 + k8s.io/apimachinery v0.34.2 + k8s.io/client-go v0.34.2 k8s.io/kubectl v0.26.2 - sigs.k8s.io/controller-runtime v0.21.0 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + sigs.k8s.io/controller-runtime v0.22.4 ) require ( @@ -64,26 +68,40 @@ require ( github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudnative-pg/barman-cloud v0.3.3 // indirect + github.com/cloudnative-pg/cnpg-i v0.3.0 // indirect + github.com/cloudnative-pg/machinery v0.3.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emicklei/go-restful/v3 v3.13.0 // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/jsonpointer v0.22.0 // indirect + github.com/go-openapi/jsonreference v0.21.1 // indirect + github.com/go-openapi/swag v0.24.1 // indirect + github.com/go-openapi/swag/cmdutils v0.24.0 // indirect + github.com/go-openapi/swag/conv v0.24.0 // indirect + github.com/go-openapi/swag/fileutils v0.24.0 // indirect + github.com/go-openapi/swag/jsonname v0.24.0 // indirect + github.com/go-openapi/swag/jsonutils v0.24.0 // indirect + github.com/go-openapi/swag/loading v0.24.0 // indirect + github.com/go-openapi/swag/mangling v0.24.0 // indirect + github.com/go-openapi/swag/netutils v0.24.0 // indirect + github.com/go-openapi/swag/stringutils v0.24.0 // indirect + github.com/go-openapi/swag/typeutils v0.24.0 // indirect + github.com/go-openapi/swag/yamlutils v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.23.2 // indirect - github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/cel-go v0.26.0 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect @@ -91,35 +109,40 @@ require ( github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/cpuid v1.3.1 // indirect + github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mailru/easyjson v0.7.7 // indirect + github.com/lib/pq v1.10.9 // indirect + github.com/mailru/easyjson v0.9.0 // indirect github.com/minio/md5-simd v1.1.0 // indirect github.com/minio/sha256-simd v0.1.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect github.com/rs/xid v1.2.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/spf13/cobra v1.8.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect go.opentelemetry.io/otel v1.40.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 // indirect @@ -129,6 +152,7 @@ require ( go.opentelemetry.io/otel/trace v1.40.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.47.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect @@ -139,7 +163,7 @@ require ( golang.org/x/sys v0.40.0 // indirect golang.org/x/term v0.39.0 // indirect golang.org/x/text v0.33.0 // indirect - golang.org/x/time v0.9.0 // indirect + golang.org/x/time v0.12.0 // indirect golang.org/x/tools v0.41.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect @@ -147,18 +171,17 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20260128011058-8636f8732409 // indirect google.golang.org/grpc v1.78.0 // indirect google.golang.org/protobuf v1.36.11 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.33.0 // indirect - k8s.io/component-base v0.33.0 // indirect + k8s.io/apiserver v0.34.2 // indirect + k8s.io/component-base v0.34.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + k8s.io/kube-openapi v0.0.0-20250905212525-66792eed8611 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect - sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect - sigs.k8s.io/yaml v1.4.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) diff --git a/go.sum b/go.sum index f4c6dae6b..9ec3df9aa 100644 --- a/go.sum +++ b/go.sum @@ -82,11 +82,18 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudnative-pg/barman-cloud v0.3.3 h1:EEcjeV+IUivDpmyF/H/XGY1pGaKJ5LS5MYeB6wgGcak= +github.com/cloudnative-pg/barman-cloud v0.3.3/go.mod h1:5CM4MncAxAjnqxjDt0I5E/oVd7gsMLL0/o/wQ+vUSgs= +github.com/cloudnative-pg/cloudnative-pg v1.28.0 h1:vkv0a0ewDSfJOPJrsyUr4uczsxheReAWf/k171V0Dm0= +github.com/cloudnative-pg/cloudnative-pg v1.28.0/go.mod h1:209fkRR6m0vXUVQ9Q498eAPQqN2UlXECbXXtpGsZz3I= +github.com/cloudnative-pg/cnpg-i v0.3.0 h1:5ayNOG5x68lU70IVbHDZQrv5p+bErCJ0mqRmOpW2jjE= +github.com/cloudnative-pg/cnpg-i v0.3.0/go.mod h1:VOIWWXcJ1RyioK+elR2DGOa4cBA6K+6UQgx05aZmH+g= +github.com/cloudnative-pg/machinery v0.3.1 h1:KtPA6EwELTUNisCMLiFYkK83GU9606rkGQhDJGPB8Yw= +github.com/cloudnative-pg/machinery v0.3.1/go.mod h1:jebuqKxZAbrRKDEEpVCIDMKW+FbWtB9Kf/hb2kMUu9o= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0= github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -94,8 +101,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes= +github.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -105,18 +112,18 @@ github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= @@ -130,14 +137,34 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/jsonpointer v0.22.0 h1:TmMhghgNef9YXxTu1tOopo+0BGEytxA+okbry0HjZsM= +github.com/go-openapi/jsonpointer v0.22.0/go.mod h1:xt3jV88UtExdIkkL7NloURjRQjbeUgcxFblMjq2iaiU= +github.com/go-openapi/jsonreference v0.21.1 h1:bSKrcl8819zKiOgxkbVNRUBIr6Wwj9KYrDbMjRs0cDA= +github.com/go-openapi/jsonreference v0.21.1/go.mod h1:PWs8rO4xxTUqKGu+lEvvCxD5k2X7QYkKAepJyCmSTT8= +github.com/go-openapi/swag v0.24.1 h1:DPdYTZKo6AQCRqzwr/kGkxJzHhpKxZ9i/oX0zag+MF8= +github.com/go-openapi/swag v0.24.1/go.mod h1:sm8I3lCPlspsBBwUm1t5oZeWZS0s7m/A+Psg0ooRU0A= +github.com/go-openapi/swag/cmdutils v0.24.0 h1:KlRCffHwXFI6E5MV9n8o8zBRElpY4uK4yWyAMWETo9I= +github.com/go-openapi/swag/cmdutils v0.24.0/go.mod h1:uxib2FAeQMByyHomTlsP8h1TtPd54Msu2ZDU/H5Vuf8= +github.com/go-openapi/swag/conv v0.24.0 h1:ejB9+7yogkWly6pnruRX45D1/6J+ZxRu92YFivx54ik= +github.com/go-openapi/swag/conv v0.24.0/go.mod h1:jbn140mZd7EW2g8a8Y5bwm8/Wy1slLySQQ0ND6DPc2c= +github.com/go-openapi/swag/fileutils v0.24.0 h1:U9pCpqp4RUytnD689Ek/N1d2N/a//XCeqoH508H5oak= +github.com/go-openapi/swag/fileutils v0.24.0/go.mod h1:3SCrCSBHyP1/N+3oErQ1gP+OX1GV2QYFSnrTbzwli90= +github.com/go-openapi/swag/jsonname v0.24.0 h1:2wKS9bgRV/xB8c62Qg16w4AUiIrqqiniJFtZGi3dg5k= +github.com/go-openapi/swag/jsonname v0.24.0/go.mod h1:GXqrPzGJe611P7LG4QB9JKPtUZ7flE4DOVechNaDd7Q= +github.com/go-openapi/swag/jsonutils v0.24.0 h1:F1vE1q4pg1xtO3HTyJYRmEuJ4jmIp2iZ30bzW5XgZts= +github.com/go-openapi/swag/jsonutils v0.24.0/go.mod h1:vBowZtF5Z4DDApIoxcIVfR8v0l9oq5PpYRUuteVu6f0= +github.com/go-openapi/swag/loading v0.24.0 h1:ln/fWTwJp2Zkj5DdaX4JPiddFC5CHQpvaBKycOlceYc= +github.com/go-openapi/swag/loading v0.24.0/go.mod h1:gShCN4woKZYIxPxbfbyHgjXAhO61m88tmjy0lp/LkJk= +github.com/go-openapi/swag/mangling v0.24.0 h1:PGOQpViCOUroIeak/Uj/sjGAq9LADS3mOyjznmHy2pk= +github.com/go-openapi/swag/mangling v0.24.0/go.mod h1:Jm5Go9LHkycsz0wfoaBDkdc4CkpuSnIEf62brzyCbhc= +github.com/go-openapi/swag/netutils v0.24.0 h1:Bz02HRjYv8046Ycg/w80q3g9QCWeIqTvlyOjQPDjD8w= +github.com/go-openapi/swag/netutils v0.24.0/go.mod h1:WRgiHcYTnx+IqfMCtu0hy9oOaPR0HnPbmArSRN1SkZM= +github.com/go-openapi/swag/stringutils v0.24.0 h1:i4Z/Jawf9EvXOLUbT97O0HbPUja18VdBxeadyAqS1FM= +github.com/go-openapi/swag/stringutils v0.24.0/go.mod h1:5nUXB4xA0kw2df5PRipZDslPJgJut+NjL7D25zPZ/4w= +github.com/go-openapi/swag/typeutils v0.24.0 h1:d3szEGzGDf4L2y1gYOSSLeK6h46F+zibnEas2Jm/wIw= +github.com/go-openapi/swag/typeutils v0.24.0/go.mod h1:q8C3Kmk/vh2VhpCLaoR2MVWOGP8y7Jc8l82qCTd1DYI= +github.com/go-openapi/swag/yamlutils v0.24.0 h1:bhw4894A7Iw6ne+639hsBNRHg9iZg/ISrOVr+sJGp4c= +github.com/go-openapi/swag/yamlutils v0.24.0/go.mod h1:DpKv5aYuaGm/sULePoeiG8uwMpZSfReo1HR3Ik0yaG8= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -166,17 +193,16 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= -github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= -github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= -github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= +github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -204,6 +230,14 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fm github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo= +github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -223,17 +257,20 @@ github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0 h1:bMqrb3UHgHbP+PW9VwiejfDJU1R0PpXVZNMdeH8WYKI= +github.com/kubernetes-csi/external-snapshotter/client/v8 v8.4.0/go.mod h1:E3vdYxHj2C2q6qo8/Da4g7P+IcwqRZyy3gJBzYybV9Y= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= @@ -253,8 +290,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= @@ -281,29 +319,34 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2 h1:VRXUgbGmpmjZgFYiUnTwlC+JjfCUs5KKFsorJhI1ZKQ= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.2/go.mod h1:nPk0OteXBkbT0CRCa2oZQL1jRLW6RJ2fuIijHypeJdk= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sethvargo/go-password v0.3.1 h1:WqrLTjo7X6AcVYfC6R7GtSyuUQR9hGyAj/f1PYQZCJU= +github.com/sethvargo/go-password v0.3.1/go.mod h1:rXofC1zT54N7R8K/h1WDUdkf9BOx5OptoxrMBcrXzvs= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -321,6 +364,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/thoas/go-funk v0.9.3 h1:7+nAEx3kn5ZJcnDm2Bh23N2yOtweO14bi//dvRtgLpw= +github.com/thoas/go-funk v0.9.3/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -339,8 +384,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= @@ -363,8 +408,10 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= -go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc= +go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -430,8 +477,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= +golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -489,8 +536,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -508,36 +555,35 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= -k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= -k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= -k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= -k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= -k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= -k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= -k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= -k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= -k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= -k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= +k8s.io/api v0.34.2 h1:fsSUNZhV+bnL6Aqrp6O7lMTy6o5x2C4XLjnh//8SLYY= +k8s.io/api v0.34.2/go.mod h1:MMBPaWlED2a8w4RSeanD76f7opUoypY8TFYkSM+3XHw= +k8s.io/apiextensions-apiserver v0.34.2 h1:WStKftnGeoKP4AZRz/BaAAEJvYp4mlZGN0UCv+uvsqo= +k8s.io/apiextensions-apiserver v0.34.2/go.mod h1:398CJrsgXF1wytdaanynDpJ67zG4Xq7yj91GrmYN2SE= +k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= +k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/apiserver v0.34.2 h1:2/yu8suwkmES7IzwlehAovo8dDE07cFRC7KMDb1+MAE= +k8s.io/apiserver v0.34.2/go.mod h1:gqJQy2yDOB50R3JUReHSFr+cwJnL8G1dzTA0YLEqAPI= +k8s.io/client-go v0.34.2 h1:Co6XiknN+uUZqiddlfAjT68184/37PS4QAzYvQvDR8M= +k8s.io/client-go v0.34.2/go.mod h1:2VYDl1XXJsdcAxw7BenFslRQX28Dxz91U9MWKjX97fE= +k8s.io/component-base v0.34.2 h1:HQRqK9x2sSAsd8+R4xxRirlTjowsg6fWCPwWYeSvogQ= +k8s.io/component-base v0.34.2/go.mod h1:9xw2FHJavUHBFpiGkZoKuYZ5pdtLKe97DEByaA+hHbM= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= -k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kube-openapi v0.0.0-20250905212525-66792eed8611 h1:o4oKOsvSymDkZRsMAPZU7bRdwL+lPOK5VS10Dr1D6eg= +k8s.io/kube-openapi v0.0.0-20250905212525-66792eed8611/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= k8s.io/kubectl v0.26.2 h1:SMPB4j48eVFxsYluBq3VLyqXtE6b72YnszkbTAtFye4= k8s.io/kubectl v0.26.2/go.mod h1:KYWOXSwp2BrDn3kPeoU/uKzKtdqvhK1dgZGd0+no4cM= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= -sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= -sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= +sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= -sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/internal/controller/postgrescluster_controller.go b/internal/controller/postgrescluster_controller.go new file mode 100644 index 000000000..dfa1f7eaf --- /dev/null +++ b/internal/controller/postgrescluster_controller.go @@ -0,0 +1,179 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + clustercore "github.com/splunk/splunk-operator/pkg/postgresql/cluster/core" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +const ( + ClusterTotalWorker int = 2 +) + +// PostgresClusterReconciler reconciles PostgresCluster resources. +type PostgresClusterReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusterclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters/status,verbs=get +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=poolers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=postgresql.cnpg.io,resources=poolers/status,verbs=get + +func (r *PostgresClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return clustercore.PostgresClusterService(ctx, r.Client, r.Scheme, req) +} + +// SetupWithManager registers the controller and owned resource watches. +func (r *PostgresClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterprisev4.PostgresCluster{}, builder.WithPredicates(postgresClusterPredicator())). + Owns(&cnpgv1.Cluster{}, builder.WithPredicates(cnpgClusterPredicator())). + Owns(&cnpgv1.Pooler{}, builder.WithPredicates(cnpgPoolerPredicator())). + Owns(&corev1.Secret{}, builder.WithPredicates(secretPredicator())). + Owns(&corev1.ConfigMap{}, builder.WithPredicates(configMapPredicator())). + Named("postgresCluster"). + WithOptions(controller.Options{ + MaxConcurrentReconciles: ClusterTotalWorker, + }). + Complete(r) +} + +func deletionTimestampChanged(oldObj, newObj metav1.Object) bool { + return !equality.Semantic.DeepEqual(oldObj.GetDeletionTimestamp(), newObj.GetDeletionTimestamp()) +} + +func ownerReferencesChanged(oldObj, newObj metav1.Object) bool { + return !equality.Semantic.DeepEqual(oldObj.GetOwnerReferences(), newObj.GetOwnerReferences()) +} + +// postgresClusterPredicator triggers on generation changes, deletion, and finalizer transitions. +func postgresClusterPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldOK := e.ObjectOld.(*enterprisev4.PostgresCluster) + newObj, newOK := e.ObjectNew.(*enterprisev4.PostgresCluster) + if !oldOK || !newOK { + return true + } + if oldObj.Generation != newObj.Generation { + return true + } + if deletionTimestampChanged(oldObj, newObj) { + return true + } + // Finalizer changes indicate registration or deletion  always reconcile. + return controllerutil.ContainsFinalizer(oldObj, clustercore.PostgresClusterFinalizerName) != + controllerutil.ContainsFinalizer(newObj, clustercore.PostgresClusterFinalizerName) + }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// cnpgClusterPredicator triggers only on phase changes or owner reference changes. +func cnpgClusterPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldOK := e.ObjectOld.(*cnpgv1.Cluster) + newObj, newOK := e.ObjectNew.(*cnpgv1.Cluster) + if !oldOK || !newOK { + return true + } + return oldObj.Status.Phase != newObj.Status.Phase || + ownerReferencesChanged(oldObj, newObj) + }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// cnpgPoolerPredicator triggers only on instance count changes. +func cnpgPoolerPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldOK := e.ObjectOld.(*cnpgv1.Pooler) + newObj, newOK := e.ObjectNew.(*cnpgv1.Pooler) + if !oldOK || !newOK { + return true + } + return oldObj.Status.Instances != newObj.Status.Instances + }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// secretPredicator triggers only on owner reference changes. +func secretPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldOK := e.ObjectOld.(*corev1.Secret) + newObj, newOK := e.ObjectNew.(*corev1.Secret) + if !oldOK || !newOK { + return true + } + return ownerReferencesChanged(oldObj, newObj) + }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} + +// configMapPredicator triggers on data, label, annotation, or owner reference changes. +func configMapPredicator() predicate.Predicate { + return predicate.Funcs{ + CreateFunc: func(event.CreateEvent) bool { return true }, + DeleteFunc: func(event.DeleteEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { + oldObj, oldOK := e.ObjectOld.(*corev1.ConfigMap) + newObj, newOK := e.ObjectNew.(*corev1.ConfigMap) + if !oldOK || !newOK { + return true + } + return !equality.Semantic.DeepEqual(oldObj.Data, newObj.Data) || + !equality.Semantic.DeepEqual(oldObj.Labels, newObj.Labels) || + !equality.Semantic.DeepEqual(oldObj.Annotations, newObj.Annotations) || + ownerReferencesChanged(oldObj, newObj) + }, + GenericFunc: func(event.GenericEvent) bool { return false }, + } +} diff --git a/internal/controller/postgrescluster_controller_test.go b/internal/controller/postgrescluster_controller_test.go new file mode 100644 index 000000000..c0f3493d9 --- /dev/null +++ b/internal/controller/postgrescluster_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" +) + +var _ = Describe("PostgresCluster Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + postgresCluster := &enterprisev4.PostgresCluster{} + + BeforeEach(func() { + By("creating the custom resource for the Kind PostgresCluster") + err := k8sClient.Get(ctx, typeNamespacedName, postgresCluster) + if err != nil && errors.IsNotFound(err) { + resource := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &enterprisev4.PostgresCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance PostgresCluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &PostgresClusterReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/internal/controller/postgresdatabase_controller.go b/internal/controller/postgresdatabase_controller.go new file mode 100644 index 000000000..40faa3eb3 --- /dev/null +++ b/internal/controller/postgresdatabase_controller.go @@ -0,0 +1,115 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "reflect" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + dbadapter "github.com/splunk/splunk-operator/pkg/postgresql/database/adapter" + dbcore "github.com/splunk/splunk-operator/pkg/postgresql/database/core" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// PostgresDatabaseReconciler reconciles a PostgresDatabase object. +type PostgresDatabaseReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +const ( + DatabaseTotalWorker int = 2 +) + +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresdatabases/finalizers,verbs=update +//+kubebuilder:rbac:groups=enterprise.splunk.com,resources=postgresclusters,verbs=get;list;watch +//+kubebuilder:rbac:groups=postgresql.cnpg.io,resources=clusters,verbs=get;list;watch;patch +//+kubebuilder:rbac:groups=postgresql.cnpg.io,resources=databases,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;delete +//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;delete + +func (r *PostgresDatabaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + postgresDB := &enterprisev4.PostgresDatabase{} + if err := r.Get(ctx, req.NamespacedName, postgresDB); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresDatabase resource not found, ignoring") + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + return dbcore.PostgresDatabaseService(ctx, r.Client, r.Scheme, postgresDB, dbadapter.NewDBRepository) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PostgresDatabaseReconciler) SetupWithManager(mgr ctrl.Manager) error { + if err := mgr.GetFieldIndexer().IndexField( + context.Background(), + &cnpgv1.Database{}, + ".metadata.controller", + func(obj client.Object) []string { + owner := metav1.GetControllerOf(obj) + if owner == nil { + return nil + } + if owner.APIVersion != enterprisev4.GroupVersion.String() || owner.Kind != "PostgresDatabase" { + return nil + } + return []string{owner.Name} + }, + ); err != nil { + return err + } + return ctrl.NewControllerManagedBy(mgr). + For(&enterprisev4.PostgresDatabase{}, builder.WithPredicates( + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + return !reflect.DeepEqual( + e.ObjectOld.GetFinalizers(), + e.ObjectNew.GetFinalizers(), + ) + }, + }, + ), + )). + Owns(&cnpgv1.Database{}). + Owns(&corev1.Secret{}). + Owns(&corev1.ConfigMap{}). + Named("postgresdatabase"). + WithOptions(controller.Options{ + MaxConcurrentReconciles: DatabaseTotalWorker, + }). + Complete(r) +} diff --git a/internal/controller/postgresdatabase_controller_test.go b/internal/controller/postgresdatabase_controller_test.go new file mode 100644 index 000000000..4e0589cad --- /dev/null +++ b/internal/controller/postgresdatabase_controller_test.go @@ -0,0 +1,84 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" +) + +var _ = Describe("Database Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", // TODO(user):Modify as needed + } + database := &enterprisev4.PostgresDatabase{} + + BeforeEach(func() { + By("creating the custom resource for the Kind Database") + err := k8sClient.Get(ctx, typeNamespacedName, database) + if err != nil && errors.IsNotFound(err) { + resource := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + // TODO(user): Specify other spec details if needed. + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + // TODO(user): Cleanup logic after each test, like removing the resource instance. + resource := &enterprisev4.PostgresDatabase{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance Database") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &PostgresDatabaseReconciler{ + Client: k8sClient, + Scheme: k8sClient.Scheme(), + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. + // Example: If you expect a certain status condition after reconciliation, verify it here. + }) + }) +}) diff --git a/pkg/postgresql/cluster/core/cluster.go b/pkg/postgresql/cluster/core/cluster.go new file mode 100644 index 000000000..3459101a4 --- /dev/null +++ b/pkg/postgresql/cluster/core/cluster.go @@ -0,0 +1,1015 @@ +/* +Copyright 2026. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "context" + "fmt" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + password "github.com/sethvargo/go-password/password" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + log "sigs.k8s.io/controller-runtime/pkg/log" +) + +// PostgresClusterService is the application service entry point called by the primary adapter (reconciler). +func PostgresClusterService(ctx context.Context, c client.Client, scheme *runtime.Scheme, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.Info("Reconciling PostgresCluster", "name", req.Name, "namespace", req.Namespace) + + var cnpgCluster *cnpgv1.Cluster + var poolerEnabled bool + var postgresSecretName string + secret := &corev1.Secret{} + + // 1. Fetch the PostgresCluster instance, stop if not found. + postgresCluster := &enterprisev4.PostgresCluster{} + if err := c.Get(ctx, req.NamespacedName, postgresCluster); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresCluster deleted, skipping reconciliation") + return ctrl.Result{}, nil + } + logger.Error(err, "Unable to fetch PostgresCluster") + return ctrl.Result{}, err + } + if postgresCluster.Status.Resources == nil { + postgresCluster.Status.Resources = &enterprisev4.PostgresClusterResources{} + } + + updateStatus := func(conditionType conditionTypes, status metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileClusterPhases) error { + return setStatus(ctx, c, postgresCluster, conditionType, status, reason, message, phase) + } + + // Finalizer handling must come before any other processing. + if err := handleFinalizer(ctx, c, scheme, postgresCluster, secret); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresCluster already deleted, skipping finalizer update") + return ctrl.Result{}, nil + } + logger.Error(err, "Failed to handle finalizer") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterDeleteFailed, + fmt.Sprintf("Failed to delete resources during cleanup: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if postgresCluster.GetDeletionTimestamp() != nil { + logger.Info("PostgresCluster is being deleted, cleanup complete") + return ctrl.Result{}, nil + } + + // Add finalizer if not present. + if !controllerutil.ContainsFinalizer(postgresCluster, PostgresClusterFinalizerName) { + controllerutil.AddFinalizer(postgresCluster, PostgresClusterFinalizerName) + if err := c.Update(ctx, postgresCluster); err != nil { + if apierrors.IsConflict(err) { + logger.Info("Conflict while adding finalizer, will retry on next reconcile") + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Failed to add finalizer to PostgresCluster") + return ctrl.Result{}, fmt.Errorf("failed to add finalizer: %w", err) + } + logger.Info("Finalizer added successfully") + return ctrl.Result{}, nil + } + + // 2. Load the referenced PostgresClusterClass. + clusterClass := &enterprisev4.PostgresClusterClass{} + if err := c.Get(ctx, client.ObjectKey{Name: postgresCluster.Spec.Class}, clusterClass); err != nil { + logger.Error(err, "Unable to fetch referenced PostgresClusterClass", "className", postgresCluster.Spec.Class) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterClassNotFound, + fmt.Sprintf("ClusterClass %s not found: %v", postgresCluster.Spec.Class, err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 3. Merge PostgresClusterSpec on top of PostgresClusterClass defaults. + mergedConfig, err := getMergedConfig(clusterClass, postgresCluster) + if err != nil { + logger.Error(err, "Failed to merge PostgresCluster configuration") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonInvalidConfiguration, + fmt.Sprintf("Failed to merge configuration: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 4. Resolve or derive the superuser secret name. + if postgresCluster.Status.Resources != nil && postgresCluster.Status.Resources.SuperUserSecretRef != nil { + postgresSecretName = postgresCluster.Status.Resources.SuperUserSecretRef.Name + logger.Info("Using existing secret from status", "name", postgresSecretName) + } else { + postgresSecretName = fmt.Sprintf("%s%s", postgresCluster.Name, defaultSecretSuffix) + logger.Info("Generating new secret name", "name", postgresSecretName) + } + + secretExists, secretErr := clusterSecretExists(ctx, c, postgresCluster.Namespace, postgresSecretName, secret) + if secretErr != nil { + logger.Error(secretErr, "Failed to check if PostgresCluster secret exists", "name", postgresSecretName) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonUserSecretFailed, + fmt.Sprintf("Failed to check secret existence: %v", secretErr), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, secretErr + } + if !secretExists { + logger.Info("Creating PostgresCluster secret", "name", postgresSecretName) + if err := ensureClusterSecret(ctx, c, scheme, postgresCluster, postgresSecretName, secret); err != nil { + logger.Error(err, "Failed to ensure PostgresCluster secret", "name", postgresSecretName) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonUserSecretFailed, + fmt.Sprintf("Failed to generate PostgresCluster secret: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := c.Status().Update(ctx, postgresCluster); err != nil { + if apierrors.IsConflict(err) { + logger.Info("Conflict after secret creation, will requeue") + return ctrl.Result{Requeue: true}, nil + } + logger.Error(err, "Failed to update status after secret creation") + return ctrl.Result{}, err + } + logger.Info("SuperUserSecretRef persisted to status") + } + + // Re-attach ownerRef if it was stripped (e.g. by a Retain-policy deletion of a previous cluster). + hasOwnerRef, ownerRefErr := controllerutil.HasOwnerReference(secret.GetOwnerReferences(), postgresCluster, scheme) + if ownerRefErr != nil { + logger.Error(ownerRefErr, "Failed to check owner reference on Secret") + return ctrl.Result{}, fmt.Errorf("failed to check owner reference on secret: %w", ownerRefErr) + } + if secretExists && !hasOwnerRef { + logger.Info("Connecting existing secret to PostgresCluster by adding owner reference", "name", postgresSecretName) + originalSecret := secret.DeepCopy() + if err := ctrl.SetControllerReference(postgresCluster, secret, scheme); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set controller reference on existing secret: %w", err) + } + if err := patchObject(ctx, c, originalSecret, secret, "Secret"); err != nil { + logger.Error(err, "Failed to patch existing secret with controller reference") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonSuperUserSecretFailed, + fmt.Sprintf("Failed to patch existing secret: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + logger.Info("Existing secret linked successfully") + } + + if postgresCluster.Status.Resources.SuperUserSecretRef == nil { + postgresCluster.Status.Resources.SuperUserSecretRef = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: postgresSecretName}, + Key: secretKeyPassword, + } + } + + // 5. Build desired CNPG Cluster spec. + desiredSpec := buildCNPGClusterSpec(mergedConfig, postgresSecretName) + + // 6. Fetch existing CNPG Cluster or create it. + existingCNPG := &cnpgv1.Cluster{} + err = c.Get(ctx, types.NamespacedName{Name: postgresCluster.Name, Namespace: postgresCluster.Namespace}, existingCNPG) + switch { + case apierrors.IsNotFound(err): + logger.Info("CNPG Cluster not found, creating", "name", postgresCluster.Name) + newCluster := buildCNPGCluster(scheme, postgresCluster, mergedConfig, postgresSecretName) + if err := c.Create(ctx, newCluster); err != nil { + logger.Error(err, "Failed to create CNPG Cluster") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterBuildFailed, + fmt.Sprintf("Failed to create CNPG Cluster: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterBuildSucceeded, + "CNPG Cluster created", pendingClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + logger.Info("CNPG Cluster created successfully, requeueing for status update", "name", postgresCluster.Name) + return ctrl.Result{RequeueAfter: retryDelay}, nil + case err != nil: + logger.Error(err, "Failed to get CNPG Cluster") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterGetFailed, + fmt.Sprintf("Failed to get CNPG Cluster: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 7. Patch CNPG Cluster spec if drift detected. + cnpgCluster = existingCNPG + currentNormalized := normalizeCNPGClusterSpec(cnpgCluster.Spec, mergedConfig.Spec.PostgreSQLConfig) + desiredNormalized := normalizeCNPGClusterSpec(desiredSpec, mergedConfig.Spec.PostgreSQLConfig) + + if !equality.Semantic.DeepEqual(currentNormalized, desiredNormalized) { + logger.Info("Detected drift in CNPG Cluster spec, patching", "name", cnpgCluster.Name) + originalCluster := cnpgCluster.DeepCopy() + cnpgCluster.Spec = desiredSpec + + switch patchErr := patchObject(ctx, c, originalCluster, cnpgCluster, "CNPGCluster"); { + case apierrors.IsConflict(patchErr): + logger.Info("Conflict occurred while updating CNPG Cluster, requeueing", "name", cnpgCluster.Name) + return ctrl.Result{Requeue: true}, nil + case patchErr != nil: + logger.Error(patchErr, "Failed to patch CNPG Cluster", "name", cnpgCluster.Name) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterPatchFailed, + fmt.Sprintf("Failed to patch CNPG Cluster: %v", patchErr), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, patchErr + default: + logger.Info("CNPG Cluster patched successfully, requeueing for status update", "name", cnpgCluster.Name) + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + } + + // 7a. Reconcile ManagedRoles. + if err := reconcileManagedRoles(ctx, c, postgresCluster, cnpgCluster); err != nil { + logger.Error(err, "Failed to reconcile managed roles") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonManagedRolesFailed, + fmt.Sprintf("Failed to reconcile managed roles: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + + // 7b. Reconcile Connection Pooler. + poolerEnabled = mergedConfig.Spec.ConnectionPoolerEnabled != nil && *mergedConfig.Spec.ConnectionPoolerEnabled + switch { + case !poolerEnabled: + if err := deleteConnectionPoolers(ctx, c, postgresCluster); err != nil { + logger.Error(err, "Failed to delete connection poolers") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to delete connection poolers: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + postgresCluster.Status.ConnectionPoolerStatus = nil + meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, string(poolerReady)) + + case !poolerExists(ctx, c, postgresCluster, readWriteEndpoint) || !poolerExists(ctx, c, postgresCluster, readOnlyEndpoint): + if mergedConfig.CNPG == nil || mergedConfig.CNPG.ConnectionPooler == nil { + logger.Info("Connection pooler enabled but no config found in class or cluster spec, skipping", + "class", postgresCluster.Spec.Class, "cluster", postgresCluster.Name) + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerConfigMissing, + fmt.Sprintf("Connection pooler is enabled but no config found in class %q or cluster %q", + postgresCluster.Spec.Class, postgresCluster.Name), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, nil + } + if cnpgCluster.Status.Phase != cnpgv1.PhaseHealthy { + logger.Info("CNPG Cluster not healthy yet, pending pooler creation", "clusterPhase", cnpgCluster.Status.Phase) + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonCNPGClusterNotHealthy, + "Waiting for CNPG cluster to become healthy before creating poolers", pendingClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + if err := createOrUpdateConnectionPoolers(ctx, c, scheme, postgresCluster, mergedConfig, cnpgCluster); err != nil { + logger.Error(err, "Failed to reconcile connection pooler") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to reconcile connection pooler: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + logger.Info("Connection Poolers created, requeueing to check readiness") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerCreating, + "Connection poolers are being provisioned", provisioningClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + + case func() bool { + rwPooler := &cnpgv1.Pooler{} + rwErr := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readWriteEndpoint), + Namespace: postgresCluster.Namespace, + }, rwPooler) + roPooler := &cnpgv1.Pooler{} + roErr := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readOnlyEndpoint), + Namespace: postgresCluster.Namespace, + }, roPooler) + return rwErr != nil || roErr != nil || !arePoolersReady(rwPooler, roPooler) + }(): + logger.Info("Connection Poolers are not ready yet, requeueing") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerCreating, + "Connection poolers are being provisioned", pendingClusterPhase); statusErr != nil { + if apierrors.IsConflict(statusErr) { + logger.Info("Conflict updating pooler status, will requeue") + return ctrl.Result{Requeue: true}, nil + } + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + + default: + if err := syncPoolerStatus(ctx, c, postgresCluster); err != nil { + logger.Error(err, "Failed to sync pooler status") + if statusErr := updateStatus(poolerReady, metav1.ConditionFalse, reasonPoolerReconciliationFailed, + fmt.Sprintf("Failed to sync pooler status: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + } + + // 8. Reconcile ConfigMap when CNPG cluster is healthy. + if cnpgCluster.Status.Phase == cnpgv1.PhaseHealthy { + logger.Info("CNPG Cluster is ready, reconciling ConfigMap for connection details") + desiredCM, err := generateConfigMap(ctx, c, scheme, postgresCluster, cnpgCluster, postgresSecretName) + if err != nil { + logger.Error(err, "Failed to generate ConfigMap") + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonConfigMapFailed, + fmt.Sprintf("Failed to generate ConfigMap: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: desiredCM.Name, Namespace: desiredCM.Namespace}} + createOrUpdateResult, err := controllerutil.CreateOrUpdate(ctx, c, cm, func() error { + cm.Data = desiredCM.Data + cm.Annotations = desiredCM.Annotations + cm.Labels = desiredCM.Labels + if !metav1.IsControlledBy(cm, postgresCluster) { + if err := ctrl.SetControllerReference(postgresCluster, cm, scheme); err != nil { + return fmt.Errorf("set controller reference failed: %w", err) + } + } + return nil + }) + if err != nil { + logger.Error(err, "Failed to reconcile ConfigMap", "name", desiredCM.Name) + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonConfigMapFailed, + fmt.Sprintf("Failed to reconcile ConfigMap: %v", err), failedClusterPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + switch createOrUpdateResult { + case controllerutil.OperationResultCreated: + logger.Info("ConfigMap created", "name", desiredCM.Name) + case controllerutil.OperationResultUpdated: + logger.Info("ConfigMap updated", "name", desiredCM.Name) + default: + logger.Info("ConfigMap unchanged", "name", desiredCM.Name) + } + if postgresCluster.Status.Resources.ConfigMapRef == nil { + postgresCluster.Status.Resources.ConfigMapRef = &corev1.LocalObjectReference{Name: desiredCM.Name} + } + } + + // 9. Final status sync. + if err := syncStatus(ctx, c, postgresCluster, cnpgCluster); err != nil { + logger.Error(err, "Failed to sync status") + if apierrors.IsConflict(err) { + logger.Info("Conflict during status update, will requeue") + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, fmt.Errorf("failed to sync status: %w", err) + } + if cnpgCluster.Status.Phase == cnpgv1.PhaseHealthy { + rwPooler := &cnpgv1.Pooler{} + rwErr := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readWriteEndpoint), + Namespace: postgresCluster.Namespace, + }, rwPooler) + roPooler := &cnpgv1.Pooler{} + roErr := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(postgresCluster.Name, readOnlyEndpoint), + Namespace: postgresCluster.Namespace, + }, roPooler) + if rwErr == nil && roErr == nil && arePoolersReady(rwPooler, roPooler) { + logger.Info("Poolers are ready, syncing pooler status") + _ = syncPoolerStatus(ctx, c, postgresCluster) + } + } + logger.Info("Reconciliation complete") + return ctrl.Result{}, nil +} + +// getMergedConfig overlays PostgresCluster spec on top of the class defaults. +// Class values are used only where the cluster spec is silent. +func getMergedConfig(class *enterprisev4.PostgresClusterClass, cluster *enterprisev4.PostgresCluster) (*MergedConfig, error) { + result := cluster.Spec.DeepCopy() + + // Config is optional on the class — apply defaults only when provided. + if defaults := class.Spec.Config; defaults != nil { + if result.Instances == nil { + result.Instances = defaults.Instances + } + if result.PostgresVersion == nil { + result.PostgresVersion = defaults.PostgresVersion + } + if result.Resources == nil { + result.Resources = defaults.Resources + } + if result.Storage == nil { + result.Storage = defaults.Storage + } + if len(result.PostgreSQLConfig) == 0 { + result.PostgreSQLConfig = defaults.PostgreSQLConfig + } + if len(result.PgHBA) == 0 { + result.PgHBA = defaults.PgHBA + } + } + + if result.Instances == nil || result.PostgresVersion == nil || result.Storage == nil { + return nil, fmt.Errorf("invalid configuration for class %s: instances, postgresVersion and storage are required", class.Name) + } + if result.PostgreSQLConfig == nil { + result.PostgreSQLConfig = make(map[string]string) + } + if result.PgHBA == nil { + result.PgHBA = make([]string, 0) + } + if result.Resources == nil { + result.Resources = &corev1.ResourceRequirements{} + } + + return &MergedConfig{Spec: result, CNPG: class.Spec.CNPG}, nil +} + +// buildCNPGClusterSpec builds the desired CNPG ClusterSpec. +// IMPORTANT: any field added here must also appear in normalizeCNPGClusterSpec, +// otherwise spec drift will be silently ignored. +func buildCNPGClusterSpec(cfg *MergedConfig, secretName string) cnpgv1.ClusterSpec { + return cnpgv1.ClusterSpec{ + ImageName: fmt.Sprintf("ghcr.io/cloudnative-pg/postgresql:%s", *cfg.Spec.PostgresVersion), + Instances: int(*cfg.Spec.Instances), + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + Parameters: cfg.Spec.PostgreSQLConfig, + PgHBA: cfg.Spec.PgHBA, + }, + SuperuserSecret: &cnpgv1.LocalObjectReference{Name: secretName}, + EnableSuperuserAccess: ptr.To(true), + Bootstrap: &cnpgv1.BootstrapConfiguration{ + InitDB: &cnpgv1.BootstrapInitDB{ + Database: defaultDatabaseName, + Owner: superUsername, + Secret: &cnpgv1.LocalObjectReference{Name: secretName}, + }, + }, + StorageConfiguration: cnpgv1.StorageConfiguration{ + Size: cfg.Spec.Storage.String(), + }, + Resources: *cfg.Spec.Resources, + } +} + +func buildCNPGCluster(scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, secretName string) *cnpgv1.Cluster { + cnpg := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: cluster.Name, Namespace: cluster.Namespace}, + Spec: buildCNPGClusterSpec(cfg, secretName), + } + ctrl.SetControllerReference(cluster, cnpg, scheme) + return cnpg +} + +func normalizeCNPGClusterSpec(spec cnpgv1.ClusterSpec, customDefinedParameters map[string]string) normalizedCNPGClusterSpec { + normalized := normalizedCNPGClusterSpec{ + ImageName: spec.ImageName, + Instances: spec.Instances, + StorageSize: spec.StorageConfiguration.Size, + Resources: spec.Resources, + } + if len(customDefinedParameters) > 0 { + normalized.CustomDefinedParameters = make(map[string]string) + for k := range customDefinedParameters { + normalized.CustomDefinedParameters[k] = spec.PostgresConfiguration.Parameters[k] + } + } + if len(spec.PostgresConfiguration.PgHBA) > 0 { + normalized.PgHBA = spec.PostgresConfiguration.PgHBA + } + if spec.Bootstrap != nil && spec.Bootstrap.InitDB != nil { + normalized.DefaultDatabase = spec.Bootstrap.InitDB.Database + normalized.Owner = spec.Bootstrap.InitDB.Owner + } + return normalized +} + +// reconcileManagedRoles synchronizes ManagedRoles from PostgresCluster spec to CNPG Cluster managed.roles. +func reconcileManagedRoles(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster) error { + logger := log.FromContext(ctx) + + if len(cluster.Spec.ManagedRoles) == 0 { + logger.Info("No managed roles to reconcile") + return nil + } + + desiredRoles := make([]cnpgv1.RoleConfiguration, 0, len(cluster.Spec.ManagedRoles)) + for _, role := range cluster.Spec.ManagedRoles { + r := cnpgv1.RoleConfiguration{ + Name: role.Name, + Ensure: cnpgv1.EnsureAbsent, + } + // Exists bool replaces the old Ensure string enum ("present"/"absent"). + if role.Exists { + r.Ensure = cnpgv1.EnsurePresent + r.Login = true + } + if role.PasswordSecretRef != nil { + // Pass only the secret name to CNPG — CNPG always reads the "password" key. + r.PasswordSecret = &cnpgv1.LocalObjectReference{Name: role.PasswordSecretRef.LocalObjectReference.Name} + } + desiredRoles = append(desiredRoles, r) + } + + var currentRoles []cnpgv1.RoleConfiguration + if cnpgCluster.Spec.Managed != nil { + currentRoles = cnpgCluster.Spec.Managed.Roles + } + + if equality.Semantic.DeepEqual(currentRoles, desiredRoles) { + logger.Info("CNPG Cluster roles already match desired state, no update needed") + return nil + } + + logger.Info("CNPG Cluster roles differ from desired state, updating", + "currentCount", len(currentRoles), "desiredCount", len(desiredRoles)) + + originalCluster := cnpgCluster.DeepCopy() + if cnpgCluster.Spec.Managed == nil { + cnpgCluster.Spec.Managed = &cnpgv1.ManagedConfiguration{} + } + cnpgCluster.Spec.Managed.Roles = desiredRoles + + if err := c.Patch(ctx, cnpgCluster, client.MergeFrom(originalCluster)); err != nil { + return fmt.Errorf("failed to patch CNPG Cluster with managed roles: %w", err) + } + logger.Info("Successfully updated CNPG Cluster with managed roles", "roleCount", len(desiredRoles)) + return nil +} + +func poolerResourceName(clusterName, poolerType string) string { + return fmt.Sprintf("%s%s%s", clusterName, defaultPoolerSuffix, poolerType) +} + +func poolerExists(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, poolerType string) bool { + pooler := &cnpgv1.Pooler{} + err := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, poolerType), + Namespace: cluster.Namespace, + }, pooler) + if apierrors.IsNotFound(err) { + return false + } + if err != nil { + log.FromContext(ctx).Error(err, "Failed to check pooler existence", "type", poolerType) + return false + } + return true +} + +func arePoolersReady(rwPooler, roPooler *cnpgv1.Pooler) bool { + return isPoolerReady(rwPooler) && isPoolerReady(roPooler) +} + +// isPoolerReady checks if a pooler has all instances scheduled. +// CNPG PoolerStatus only tracks scheduled instances, not ready pods. +func isPoolerReady(pooler *cnpgv1.Pooler) bool { + desired := int32(1) + if pooler.Spec.Instances != nil { + desired = *pooler.Spec.Instances + } + return pooler.Status.Instances >= desired +} + +func poolerInstanceCount(p *cnpgv1.Pooler) (desired, scheduled int32) { + desired = 1 + if p.Spec.Instances != nil { + desired = *p.Spec.Instances + } + return desired, p.Status.Instances +} + +// createOrUpdateConnectionPoolers creates RW and RO poolers if they don't exist. +func createOrUpdateConnectionPoolers(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, cnpgCluster *cnpgv1.Cluster) error { + if err := createConnectionPooler(ctx, c, scheme, cluster, cfg, cnpgCluster, readWriteEndpoint); err != nil { + return fmt.Errorf("failed to reconcile RW pooler: %w", err) + } + if err := createConnectionPooler(ctx, c, scheme, cluster, cfg, cnpgCluster, readOnlyEndpoint); err != nil { + return fmt.Errorf("failed to reconcile RO pooler: %w", err) + } + return nil +} + +func createConnectionPooler(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, cnpgCluster *cnpgv1.Cluster, poolerType string) error { + poolerName := poolerResourceName(cluster.Name, poolerType) + existing := &cnpgv1.Pooler{} + err := c.Get(ctx, types.NamespacedName{Name: poolerName, Namespace: cluster.Namespace}, existing) + if err == nil { + return nil // already exists + } + if !apierrors.IsNotFound(err) { + return err + } + log.FromContext(ctx).Info("Creating CNPG Pooler", "name", poolerName, "type", poolerType) + return c.Create(ctx, buildCNPGPooler(scheme, cluster, cfg, cnpgCluster, poolerType)) +} + +func buildCNPGPooler(scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cfg *MergedConfig, cnpgCluster *cnpgv1.Cluster, poolerType string) *cnpgv1.Pooler { + pc := cfg.CNPG.ConnectionPooler + instances := *pc.Instances + mode := cnpgv1.PgBouncerPoolMode(*pc.Mode) + pooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{Name: poolerResourceName(cluster.Name, poolerType), Namespace: cluster.Namespace}, + Spec: cnpgv1.PoolerSpec{ + Cluster: cnpgv1.LocalObjectReference{Name: cnpgCluster.Name}, + Instances: &instances, + Type: cnpgv1.PoolerType(poolerType), + PgBouncer: &cnpgv1.PgBouncerSpec{ + PoolMode: mode, + Parameters: pc.Config, + }, + }, + } + ctrl.SetControllerReference(cluster, pooler, scheme) + return pooler +} + +// deleteConnectionPoolers removes RW and RO poolers if they exist. +func deleteConnectionPoolers(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster) error { + logger := log.FromContext(ctx) + for _, poolerType := range []string{readWriteEndpoint, readOnlyEndpoint} { + poolerName := poolerResourceName(cluster.Name, poolerType) + if !poolerExists(ctx, c, cluster, poolerType) { + continue + } + pooler := &cnpgv1.Pooler{} + if err := c.Get(ctx, types.NamespacedName{Name: poolerName, Namespace: cluster.Namespace}, pooler); err != nil { + if apierrors.IsNotFound(err) { + continue + } + return fmt.Errorf("failed to get pooler %s: %w", poolerName, err) + } + logger.Info("Deleting CNPG Pooler", "name", poolerName) + if err := c.Delete(ctx, pooler); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete pooler %s: %w", poolerName, err) + } + } + return nil +} + +// syncPoolerStatus populates ConnectionPoolerStatus and the PoolerReady condition. +func syncPoolerStatus(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster) error { + rwPooler := &cnpgv1.Pooler{} + if err := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, readWriteEndpoint), + Namespace: cluster.Namespace, + }, rwPooler); err != nil { + return err + } + + roPooler := &cnpgv1.Pooler{} + if err := c.Get(ctx, types.NamespacedName{ + Name: poolerResourceName(cluster.Name, readOnlyEndpoint), + Namespace: cluster.Namespace, + }, roPooler); err != nil { + return err + } + + cluster.Status.ConnectionPoolerStatus = &enterprisev4.ConnectionPoolerStatus{Enabled: true} + rwDesired, rwScheduled := poolerInstanceCount(rwPooler) + roDesired, roScheduled := poolerInstanceCount(roPooler) + + return setStatus(ctx, c, cluster, poolerReady, metav1.ConditionTrue, reasonAllInstancesReady, + fmt.Sprintf("%s: %d/%d, %s: %d/%d", readWriteEndpoint, rwScheduled, rwDesired, readOnlyEndpoint, roScheduled, roDesired), + readyClusterPhase) +} + +// syncStatus maps CNPG Cluster state to PostgresCluster status. +func syncStatus(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster) error { + cluster.Status.ProvisionerRef = &corev1.ObjectReference{ + APIVersion: "postgresql.cnpg.io/v1", + Kind: "Cluster", + Namespace: cnpgCluster.Namespace, + Name: cnpgCluster.Name, + UID: cnpgCluster.UID, + } + + var phase reconcileClusterPhases + var condStatus metav1.ConditionStatus + var reason conditionReasons + var message string + + switch cnpgCluster.Status.Phase { + case cnpgv1.PhaseHealthy: + phase, condStatus, reason, message = readyClusterPhase, metav1.ConditionTrue, reasonCNPGClusterHealthy, "Cluster is up and running" + case cnpgv1.PhaseFirstPrimary, cnpgv1.PhaseCreatingReplica, cnpgv1.PhaseWaitingForInstancesToBeActive: + phase, condStatus, reason = provisioningClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioning + message = fmt.Sprintf("CNPG cluster provisioning: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseSwitchover: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGSwitchover, "Cluster changing primary node" + case cnpgv1.PhaseFailOver: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGFailingOver, "Pod missing, need to change primary" + case cnpgv1.PhaseInplacePrimaryRestart, cnpgv1.PhaseInplaceDeletePrimaryRestart: + phase, condStatus, reason = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGRestarting + message = fmt.Sprintf("CNPG cluster restarting: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseUpgrade, cnpgv1.PhaseMajorUpgrade, cnpgv1.PhaseUpgradeDelayed, cnpgv1.PhaseOnlineUpgrading: + phase, condStatus, reason = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGUpgrading + message = fmt.Sprintf("CNPG cluster upgrading: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseApplyingConfiguration: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGApplyingConfig, "Configuration change is being applied" + case cnpgv1.PhaseReplicaClusterPromotion: + phase, condStatus, reason, message = configuringClusterPhase, metav1.ConditionFalse, reasonCNPGPromoting, "Replica is being promoted to primary" + case cnpgv1.PhaseWaitingForUser: + phase, condStatus, reason, message = failedClusterPhase, metav1.ConditionFalse, reasonCNPGWaitingForUser, "Action from the user is required" + case cnpgv1.PhaseUnrecoverable: + phase, condStatus, reason, message = failedClusterPhase, metav1.ConditionFalse, reasonCNPGUnrecoverable, "Cluster failed, needs manual intervention" + case cnpgv1.PhaseCannotCreateClusterObjects: + phase, condStatus, reason, message = failedClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioningFailed, "Cluster resources cannot be created" + case cnpgv1.PhaseUnknownPlugin, cnpgv1.PhaseFailurePlugin: + phase, condStatus, reason = failedClusterPhase, metav1.ConditionFalse, reasonCNPGPluginError + message = fmt.Sprintf("CNPG plugin error: %s", cnpgCluster.Status.Phase) + case cnpgv1.PhaseImageCatalogError, cnpgv1.PhaseArchitectureBinaryMissing: + phase, condStatus, reason = failedClusterPhase, metav1.ConditionFalse, reasonCNPGImageError + message = fmt.Sprintf("CNPG image error: %s", cnpgCluster.Status.Phase) + case "": + phase, condStatus, reason, message = pendingClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioning, "CNPG cluster is pending creation" + default: + phase, condStatus, reason = provisioningClusterPhase, metav1.ConditionFalse, reasonCNPGProvisioning + message = fmt.Sprintf("CNPG cluster phase: %s", cnpgCluster.Status.Phase) + } + + return setStatus(ctx, c, cluster, clusterReady, condStatus, reason, message, phase) +} + +// setStatus sets the phase, condition and persists the status. +func setStatus(ctx context.Context, c client.Client, cluster *enterprisev4.PostgresCluster, condType conditionTypes, status metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileClusterPhases) error { + p := string(phase) + cluster.Status.Phase = &p + meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ + Type: string(condType), + Status: status, + Reason: string(reason), + Message: message, + ObservedGeneration: cluster.Generation, + }) + if err := c.Status().Update(ctx, cluster); err != nil { + return fmt.Errorf("failed to update PostgresCluster status: %w", err) + } + return nil +} + +// generateConfigMap builds a ConfigMap with connection details for the PostgresCluster. +func generateConfigMap(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster, secretName string) (*corev1.ConfigMap, error) { + cmName := fmt.Sprintf("%s%s", cluster.Name, defaultConfigMapSuffix) + if cluster.Status.Resources != nil && cluster.Status.Resources.ConfigMapRef != nil { + cmName = cluster.Status.Resources.ConfigMapRef.Name + } + + data := map[string]string{ + "CLUSTER_RW_ENDPOINT": fmt.Sprintf("%s-rw.%s", cnpgCluster.Name, cnpgCluster.Namespace), + "CLUSTER_RO_ENDPOINT": fmt.Sprintf("%s-ro.%s", cnpgCluster.Name, cnpgCluster.Namespace), + "CLUSTER_R_ENDPOINT": fmt.Sprintf("%s-r.%s", cnpgCluster.Name, cnpgCluster.Namespace), + "DEFAULT_CLUSTER_PORT": defaultPort, + "SUPER_USER_NAME": superUsername, + "SUPER_USER_SECRET_REF": secretName, + } + if poolerExists(ctx, c, cluster, readWriteEndpoint) && poolerExists(ctx, c, cluster, readOnlyEndpoint) { + data["CLUSTER_POOLER_RW_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(cnpgCluster.Name, readWriteEndpoint), cnpgCluster.Namespace) + data["CLUSTER_POOLER_RO_ENDPOINT"] = fmt.Sprintf("%s.%s", poolerResourceName(cnpgCluster.Name, readOnlyEndpoint), cnpgCluster.Namespace) + } + + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: cluster.Namespace, + Labels: map[string]string{"app.kubernetes.io/managed-by": "postgrescluster-controller"}, + }, + Data: data, + } + if err := ctrl.SetControllerReference(cluster, cm, scheme); err != nil { + return nil, fmt.Errorf("failed to set controller reference: %w", err) + } + return cm, nil +} + +// ensureClusterSecret creates the superuser secret if it doesn't exist and persists the ref to status. +func ensureClusterSecret(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, secretName string, secret *corev1.Secret) error { + err := c.Get(ctx, types.NamespacedName{Name: secretName, Namespace: cluster.Namespace}, secret) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + if apierrors.IsNotFound(err) { + pw, err := generatePassword() + if err != nil { + return err + } + newSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: cluster.Namespace}, + StringData: map[string]string{"username": superUsername, "password": pw}, + Type: corev1.SecretTypeOpaque, + } + if err := ctrl.SetControllerReference(cluster, newSecret, scheme); err != nil { + return err + } + if err := c.Create(ctx, newSecret); err != nil { + return err + } + } + if cluster.Status.Resources == nil { + cluster.Status.Resources = &enterprisev4.PostgresClusterResources{} + } + cluster.Status.Resources.SuperUserSecretRef = &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: secretKeyPassword, + } + return nil +} + +func clusterSecretExists(ctx context.Context, c client.Client, namespace, name string, secret *corev1.Secret) (bool, error) { + err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) + if apierrors.IsNotFound(err) { + return false, nil + } + return err == nil, err +} + +// deleteCNPGCluster deletes the CNPG Cluster if it exists. +func deleteCNPGCluster(ctx context.Context, c client.Client, cnpgCluster *cnpgv1.Cluster) error { + logger := log.FromContext(ctx) + if cnpgCluster == nil { + logger.Info("CNPG Cluster not found, skipping deletion") + return nil + } + logger.Info("Deleting CNPG Cluster", "name", cnpgCluster.Name) + if err := c.Delete(ctx, cnpgCluster); err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete CNPG Cluster: %w", err) + } + return nil +} + +// handleFinalizer processes deletion cleanup: removes poolers, then deletes or orphans the CNPG Cluster +// based on ClusterDeletionPolicy, then removes the finalizer. +func handleFinalizer(ctx context.Context, c client.Client, scheme *runtime.Scheme, cluster *enterprisev4.PostgresCluster, secret *corev1.Secret) error { + logger := log.FromContext(ctx) + if cluster.GetDeletionTimestamp() == nil { + logger.Info("PostgresCluster not marked for deletion, skipping finalizer logic") + return nil + } + if !controllerutil.ContainsFinalizer(cluster, PostgresClusterFinalizerName) { + logger.Info("Finalizer not present on PostgresCluster, skipping finalizer logic") + return nil + } + + cnpgCluster := &cnpgv1.Cluster{} + err := c.Get(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, cnpgCluster) + if err != nil { + if apierrors.IsNotFound(err) { + cnpgCluster = nil + logger.Info("CNPG cluster not found during cleanup") + } else { + return fmt.Errorf("failed to fetch CNPG cluster during cleanup: %w", err) + } + } + logger.Info("Processing finalizer cleanup for PostgresCluster") + + if err := deleteConnectionPoolers(ctx, c, cluster); err != nil { + logger.Error(err, "Failed to delete connection poolers during cleanup") + return fmt.Errorf("failed to delete connection poolers: %w", err) + } + + // Dereference *string — empty string falls through to default (unknown policy). + policy := "" + if cluster.Spec.ClusterDeletionPolicy != nil { + policy = *cluster.Spec.ClusterDeletionPolicy + } + + switch policy { + case clusterDeletionPolicyDelete: + logger.Info("ClusterDeletionPolicy is 'Delete', deleting CNPG Cluster and associated resources") + if cnpgCluster != nil { + if err := deleteCNPGCluster(ctx, c, cnpgCluster); err != nil { + logger.Error(err, "Failed to delete CNPG Cluster during finalizer cleanup") + return fmt.Errorf("failed to delete CNPG Cluster during finalizer cleanup: %w", err) + } + } else { + logger.Info("CNPG Cluster not found, skipping deletion") + } + + case clusterDeletionPolicyRetain: + logger.Info("ClusterDeletionPolicy is 'Retain', removing owner references to orphan CNPG Cluster") + if cnpgCluster != nil { + originalCNPG := cnpgCluster.DeepCopy() + refRemoved, err := removeOwnerRef(scheme, cluster, cnpgCluster) + if err != nil { + return fmt.Errorf("failed to remove owner reference from CNPG cluster: %w", err) + } + if !refRemoved { + logger.Info("Owner reference already removed from CNPG Cluster, skipping patch") + } + if err := patchObject(ctx, c, originalCNPG, cnpgCluster, "CNPGCluster"); err != nil { + return fmt.Errorf("failed to patch CNPG cluster after removing owner reference: %w", err) + } + logger.Info("Removed owner reference from CNPG Cluster") + } + + // Remove owner reference from the superuser Secret to prevent cascading deletion. + if cluster.Status.Resources != nil && cluster.Status.Resources.SuperUserSecretRef != nil { + secretName := cluster.Status.Resources.SuperUserSecretRef.Name + if err := c.Get(ctx, types.NamespacedName{Name: secretName, Namespace: cluster.Namespace}, secret); err != nil { + if !apierrors.IsNotFound(err) { + logger.Error(err, "Failed to fetch Secret during cleanup") + return fmt.Errorf("failed to fetch secret during cleanup: %w", err) + } + logger.Info("Secret not found, skipping owner reference removal", "secret", secretName) + } else { + originalSecret := secret.DeepCopy() + refRemoved, err := removeOwnerRef(scheme, cluster, secret) + if err != nil { + return fmt.Errorf("failed to remove owner reference from Secret: %w", err) + } + if refRemoved { + if err := patchObject(ctx, c, originalSecret, secret, "Secret"); err != nil { + return fmt.Errorf("failed to patch Secret after removing owner reference: %w", err) + } + } + logger.Info("Removed owner reference from Secret") + } + } + + default: + logger.Info("Unknown ClusterDeletionPolicy", "policy", policy) + } + + controllerutil.RemoveFinalizer(cluster, PostgresClusterFinalizerName) + if err := c.Update(ctx, cluster); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("PostgresCluster already deleted, skipping finalizer update") + return nil + } + logger.Error(err, "Failed to remove finalizer from PostgresCluster") + return fmt.Errorf("failed to remove finalizer: %w", err) + } + logger.Info("Finalizer removed, cleanup complete") + return nil +} + +func removeOwnerRef(scheme *runtime.Scheme, owner, obj client.Object) (bool, error) { + hasRef, err := controllerutil.HasOwnerReference(obj.GetOwnerReferences(), owner, scheme) + if err != nil { + return false, fmt.Errorf("failed to check owner reference: %w", err) + } + if !hasRef { + return false, nil + } + if err := controllerutil.RemoveOwnerReference(owner, obj, scheme); err != nil { + return false, fmt.Errorf("failed to remove owner reference: %w", err) + } + return true, nil +} + +// patchObject patches obj from original; treats NotFound as a no-op. +func patchObject(ctx context.Context, c client.Client, original, obj client.Object, kind objectKind) error { + logger := log.FromContext(ctx) + if err := c.Patch(ctx, obj, client.MergeFrom(original)); err != nil { + if apierrors.IsNotFound(err) { + logger.Info("Object not found, skipping patch", "kind", kind, "name", obj.GetName()) + return nil + } + return fmt.Errorf("failed to patch %s object: %w", kind, err) + } + logger.Info("Patched object successfully", "kind", kind, "name", obj.GetName()) + return nil +} + +func generatePassword() (string, error) { + const ( + length = 32 + digits = 8 + symbols = 0 + ) + return password.Generate(length, digits, symbols, false, true) +} diff --git a/pkg/postgresql/cluster/core/cluster_unit_test.go b/pkg/postgresql/cluster/core/cluster_unit_test.go new file mode 100644 index 000000000..e87173afb --- /dev/null +++ b/pkg/postgresql/cluster/core/cluster_unit_test.go @@ -0,0 +1,1139 @@ +package core + +import ( + "context" + "testing" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + client "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestPoolerResourceName(t *testing.T) { + tests := []struct { + name string + clusterName string + poolerType string + expected string + }{ + { + name: "read-write pooler", + clusterName: "my-cluster", + poolerType: "rw", + expected: "my-cluster-pooler-rw", + }, + { + name: "cluster name with mixed case and alphanumeric suffix", + clusterName: "My-Cluster-12x2f", + poolerType: "rw", + expected: "My-Cluster-12x2f-pooler-rw", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := poolerResourceName(tt.clusterName, tt.poolerType) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestIsPoolerReady(t *testing.T) { + tests := []struct { + name string + pooler *cnpgv1.Pooler + expected bool + }{ + { + name: "nil instances defaults desired to 1, zero scheduled means not ready", + pooler: &cnpgv1.Pooler{ + Status: cnpgv1.PoolerStatus{Instances: 0}, + }, + expected: false, + }, + { + name: "nil instances defaults desired to 1, one scheduled means ready", + pooler: &cnpgv1.Pooler{ + Status: cnpgv1.PoolerStatus{Instances: 1}, + }, + expected: true, + }, + { + name: "scheduled meets desired", + pooler: &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(3))}, + Status: cnpgv1.PoolerStatus{Instances: 3}, + }, + expected: true, + }, + { + name: "scheduled below desired", + pooler: &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(3))}, + Status: cnpgv1.PoolerStatus{Instances: 2}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isPoolerReady(tt.pooler) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestNormalizeCNPGClusterSpec(t *testing.T) { + tests := []struct { + name string + spec cnpgv1.ClusterSpec + customDefinedParameters map[string]string + expected normalizedCNPGClusterSpec + }{ + { + name: "basic fields are copied", + spec: cnpgv1.ClusterSpec{ + ImageName: "ghcr.io/cloudnative-pg/postgresql:18", + Instances: 3, + StorageConfiguration: cnpgv1.StorageConfiguration{Size: "10Gi"}, + }, + customDefinedParameters: nil, + expected: normalizedCNPGClusterSpec{ + ImageName: "ghcr.io/cloudnative-pg/postgresql:18", + Instances: 3, + StorageSize: "10Gi", + }, + }, + { + name: "CNPG-injected parameters are excluded from comparison", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + Parameters: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + "cnpg_injected": "should-not-appear", + }, + }, + }, + customDefinedParameters: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + CustomDefinedParameters: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + }, + }, + }, + { + name: "empty custom params does not populate CustomDefinedParameters", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + Parameters: map[string]string{"cnpg_injected": "val"}, + }, + }, + customDefinedParameters: map[string]string{}, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + }, + { + name: "PgHBA included when non-empty", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + PgHBA: []string{"hostssl all all 0.0.0.0/0 scram-sha-256"}, + }, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + PgHBA: []string{"hostssl all all 0.0.0.0/0 scram-sha-256"}, + }, + }, + { + name: "empty PgHBA is excluded", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + PostgresConfiguration: cnpgv1.PostgresConfiguration{ + PgHBA: []string{}, + }, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + }, + { + name: "bootstrap populates database and owner", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + Bootstrap: &cnpgv1.BootstrapConfiguration{ + InitDB: &cnpgv1.BootstrapInitDB{ + Database: "mydb", + Owner: "admin", + }, + }, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + DefaultDatabase: "mydb", + Owner: "admin", + }, + }, + { + name: "nil bootstrap leaves database and owner empty", + spec: cnpgv1.ClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + expected: normalizedCNPGClusterSpec{ + ImageName: "img:18", + Instances: 1, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := normalizeCNPGClusterSpec(tt.spec, tt.customDefinedParameters) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestGetMergedConfig(t *testing.T) { + classInstances := int32(1) + classVersion := "17" + classStorage := resource.MustParse("50Gi") + baseClass := &enterprisev4.PostgresClusterClass{ + ObjectMeta: metav1.ObjectMeta{Name: "standard"}, + Spec: enterprisev4.PostgresClusterClassSpec{ + Config: &enterprisev4.PostgresClusterClassConfig{ + Instances: &classInstances, + PostgresVersion: &classVersion, + Storage: &classStorage, + Resources: &corev1.ResourceRequirements{}, + PostgreSQLConfig: map[string]string{"shared_buffers": "128MB"}, + PgHBA: []string{"host all all 0.0.0.0/0 md5"}, + }, + CNPG: &enterprisev4.CNPGConfig{PrimaryUpdateMethod: ptr.To("switchover")}, + }, + } + + t.Run("cluster spec overrides class defaults", func(t *testing.T) { + overrideInstances := int32(5) + overrideVersion := "18" + overrideStorage := resource.MustParse("100Gi") + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{ + Instances: &overrideInstances, + PostgresVersion: &overrideVersion, + Storage: &overrideStorage, + PostgreSQLConfig: map[string]string{"max_connections": "200"}, + PgHBA: []string{"hostssl all all 0.0.0.0/0 scram-sha-256"}, + }, + } + + cfg, err := getMergedConfig(baseClass, cluster) + + require.NoError(t, err) + assert.Equal(t, int32(5), *cfg.Spec.Instances) + assert.Equal(t, "18", *cfg.Spec.PostgresVersion) + assert.Equal(t, "100Gi", cfg.Spec.Storage.String()) + assert.Equal(t, "200", cfg.Spec.PostgreSQLConfig["max_connections"]) + assert.Equal(t, "hostssl all all 0.0.0.0/0 scram-sha-256", cfg.Spec.PgHBA[0]) + }) + + t.Run("class defaults fill in nil cluster fields", func(t *testing.T) { + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + cfg, err := getMergedConfig(baseClass, cluster) + + require.NoError(t, err) + assert.Equal(t, int32(1), *cfg.Spec.Instances) + assert.Equal(t, "17", *cfg.Spec.PostgresVersion) + assert.Equal(t, "50Gi", cfg.Spec.Storage.String()) + assert.Equal(t, "128MB", cfg.Spec.PostgreSQLConfig["shared_buffers"]) + }) + + t.Run("returns error when required fields missing from both", func(t *testing.T) { + emptyClass := &enterprisev4.PostgresClusterClass{ + ObjectMeta: metav1.ObjectMeta{Name: "empty"}, + Spec: enterprisev4.PostgresClusterClassSpec{}, + } + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + _, err := getMergedConfig(emptyClass, cluster) + + require.Error(t, err) + }) + + t.Run("CNPG config comes from class not cluster", func(t *testing.T) { + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + cfg, err := getMergedConfig(baseClass, cluster) + + require.NoError(t, err) + require.NotNil(t, cfg.CNPG) + assert.Equal(t, "switchover", *cfg.CNPG.PrimaryUpdateMethod) + }) + + t.Run("nil maps and slices initialized to safe zero values", func(t *testing.T) { + classWithNoMaps := &enterprisev4.PostgresClusterClass{ + ObjectMeta: metav1.ObjectMeta{Name: "minimal"}, + Spec: enterprisev4.PostgresClusterClassSpec{ + Config: &enterprisev4.PostgresClusterClassConfig{ + Instances: &classInstances, + PostgresVersion: &classVersion, + Storage: &classStorage, + }, + }, + } + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{}, + } + + cfg, err := getMergedConfig(classWithNoMaps, cluster) + + require.NoError(t, err) + assert.NotNil(t, cfg.Spec.PostgreSQLConfig) + assert.NotNil(t, cfg.Spec.PgHBA) + assert.NotNil(t, cfg.Spec.Resources) + }) +} + +func TestBuildCNPGClusterSpec(t *testing.T) { + version := "18" + instances := int32(3) + storage := resource.MustParse("50Gi") + cfg := &MergedConfig{ + Spec: &enterprisev4.PostgresClusterSpec{ + PostgresVersion: &version, + Instances: &instances, + Storage: &storage, + PostgreSQLConfig: map[string]string{ + "shared_buffers": "256MB", + "max_connections": "200", + }, + PgHBA: []string{ + "hostssl all all 0.0.0.0/0 scram-sha-256", + "host replication all 10.0.0.0/8 md5", + }, + Resources: &corev1.ResourceRequirements{}, + }, + } + + spec := buildCNPGClusterSpec(cfg, "my-secret") + + assert.Equal(t, "ghcr.io/cloudnative-pg/postgresql:18", spec.ImageName) + assert.Equal(t, 3, spec.Instances) + require.NotNil(t, spec.SuperuserSecret) + assert.Equal(t, "my-secret", spec.SuperuserSecret.Name) + assert.Equal(t, "my-secret", spec.Bootstrap.InitDB.Secret.Name) + require.NotNil(t, spec.EnableSuperuserAccess) + assert.True(t, *spec.EnableSuperuserAccess) + assert.Equal(t, "postgres", spec.Bootstrap.InitDB.Database) + assert.Equal(t, "postgres", spec.Bootstrap.InitDB.Owner) + assert.Equal(t, "50Gi", spec.StorageConfiguration.Size) + assert.Equal(t, "256MB", spec.PostgresConfiguration.Parameters["shared_buffers"]) + assert.Equal(t, "200", spec.PostgresConfiguration.Parameters["max_connections"]) + require.Len(t, spec.PostgresConfiguration.PgHBA, 2) + assert.Equal(t, "hostssl all all 0.0.0.0/0 scram-sha-256", spec.PostgresConfiguration.PgHBA[0]) + assert.Equal(t, "host replication all 10.0.0.0/8 md5", spec.PostgresConfiguration.PgHBA[1]) +} + +func TestBuildCNPGPooler(t *testing.T) { + scheme := runtime.NewScheme() + enterprisev4.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + + poolerInstances := int32(3) + poolerMode := enterprisev4.ConnectionPoolerModeTransaction + postgresCluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "db-ns", + UID: "test-uid", + }, + } + cnpgCluster := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + }, + } + cfg := &MergedConfig{ + CNPG: &enterprisev4.CNPGConfig{ + ConnectionPooler: &enterprisev4.ConnectionPoolerConfig{ + Instances: &poolerInstances, + Mode: &poolerMode, + Config: map[string]string{"default_pool_size": "25"}, + }, + }, + } + + t.Run("rw pooler", func(t *testing.T) { + pooler := buildCNPGPooler(scheme, postgresCluster, cfg, cnpgCluster, "rw") + + assert.Equal(t, "my-cluster-pooler-rw", pooler.Name) + assert.Equal(t, "db-ns", pooler.Namespace) + assert.Equal(t, "my-cluster", pooler.Spec.Cluster.Name) + require.NotNil(t, pooler.Spec.Instances) + assert.Equal(t, int32(3), *pooler.Spec.Instances) + assert.Equal(t, cnpgv1.PoolerType("rw"), pooler.Spec.Type) + assert.Equal(t, cnpgv1.PgBouncerPoolMode("transaction"), pooler.Spec.PgBouncer.PoolMode) + assert.Equal(t, "25", pooler.Spec.PgBouncer.Parameters["default_pool_size"]) + require.Len(t, pooler.OwnerReferences, 1) + assert.Equal(t, "test-uid", string(pooler.OwnerReferences[0].UID)) + }) + + t.Run("ro pooler", func(t *testing.T) { + pooler := buildCNPGPooler(scheme, postgresCluster, cfg, cnpgCluster, "ro") + + assert.Equal(t, "my-cluster-pooler-ro", pooler.Name) + assert.Equal(t, cnpgv1.PoolerType("ro"), pooler.Spec.Type) + }) +} + +func TestBuildCNPGCluster(t *testing.T) { + scheme := runtime.NewScheme() + enterprisev4.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + + instances := int32(3) + version := "18" + storage := resource.MustParse("50Gi") + postgresCluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "db-ns", + UID: "pg-uid", + }, + } + cfg := &MergedConfig{ + Spec: &enterprisev4.PostgresClusterSpec{ + Instances: &instances, + PostgresVersion: &version, + Storage: &storage, + PostgreSQLConfig: map[string]string{}, + PgHBA: []string{}, + Resources: &corev1.ResourceRequirements{}, + }, + } + + cluster := buildCNPGCluster(scheme, postgresCluster, cfg, "my-secret") + + assert.Equal(t, "my-cluster", cluster.Name) + assert.Equal(t, "db-ns", cluster.Namespace) + require.Len(t, cluster.OwnerReferences, 1) + assert.Equal(t, "pg-uid", string(cluster.OwnerReferences[0].UID)) + assert.Equal(t, 3, cluster.Spec.Instances) +} + +func TestClusterSecretExists(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + tests := []struct { + name string + objects []client.Object + secretName string + expectedExists bool + }{ + { + name: "returns true when secret exists", + objects: []client.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + }, + }, + }, + secretName: "my-secret", + expectedExists: true, + }, + { + name: "returns false when secret not found", + objects: []client.Object{ + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "other-secret", + Namespace: "default", + }, + }, + }, + secretName: "missing-secret", + expectedExists: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + secret := &corev1.Secret{} + + exists, err := clusterSecretExists(context.Background(), c, "default", tt.secretName, secret) + + require.NoError(t, err) + assert.Equal(t, tt.expectedExists, exists) + }) + } +} + +func TestRemoveOwnerRef(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + owner := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "owner-uid", + }, + } + + otherOwnerRef := metav1.OwnerReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "other-owner", + UID: "other-uid", + } + ourOwnerRef := metav1.OwnerReference{ + APIVersion: "enterprise.splunk.com/v4", + Kind: "PostgresCluster", + Name: "my-cluster", + UID: "owner-uid", + } + + tests := []struct { + name string + ownerRefs []metav1.OwnerReference + expectedRemoved bool + expectedRefsLen int + }{ + { + name: "returns false when owner ref not present", + ownerRefs: nil, + expectedRemoved: false, + expectedRefsLen: 0, + }, + { + name: "removes owner ref and returns true", + ownerRefs: []metav1.OwnerReference{ourOwnerRef}, + expectedRemoved: true, + expectedRefsLen: 0, + }, + { + name: "removes only our owner ref and keeps others", + ownerRefs: []metav1.OwnerReference{otherOwnerRef, ourOwnerRef}, + expectedRemoved: true, + expectedRefsLen: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + OwnerReferences: tt.ownerRefs, + }, + } + + removed, err := removeOwnerRef(scheme, owner, secret) + + require.NoError(t, err) + assert.Equal(t, tt.expectedRemoved, removed) + assert.Len(t, secret.GetOwnerReferences(), tt.expectedRefsLen) + }) + } +} + +func TestPatchObject(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + + t.Run("patches object successfully", func(t *testing.T) { + existing := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + }, + Data: map[string][]byte{"key": []byte("old-value")}, + } + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).Build() + original := existing.DeepCopy() + existing.Data["key"] = []byte("new-value") + + err := patchObject(context.Background(), c, original, existing, "Secret") + + require.NoError(t, err) + patched := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), client.ObjectKeyFromObject(existing), patched)) + assert.Equal(t, "new-value", string(patched.Data["key"])) + }) + + t.Run("returns nil when object not found", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + original := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleted-secret", + Namespace: "default", + }, + } + modified := original.DeepCopy() + modified.Data = map[string][]byte{"key": []byte("value")} + + err := patchObject(context.Background(), c, original, modified, "Secret") + + assert.NoError(t, err) + }) +} + +func TestDeleteCNPGCluster(t *testing.T) { + scheme := runtime.NewScheme() + cnpgv1.AddToScheme(scheme) + + tests := []struct { + name string + objects []client.Object + cluster *cnpgv1.Cluster + }{ + { + name: "deletes existing cluster", + objects: []client.Object{ + &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + }, + }, + cluster: &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + }, + }, + { + name: "already deleted cluster returns nil", + cluster: &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gone-cluster", + Namespace: "default", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + + err := deleteCNPGCluster(context.Background(), c, tt.cluster) + + require.NoError(t, err) + }) + } +} + +func TestPoolerExists(t *testing.T) { + scheme := runtime.NewScheme() + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + + tests := []struct { + name string + objects []client.Object + expected bool + }{ + { + name: "returns true when pooler exists", + objects: []client.Object{ + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-rw", + Namespace: "default", + }, + }, + }, + expected: true, + }, + { + name: "returns false when given pooler is not found", + objects: []client.Object{ + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-ro", + Namespace: "default", + }, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + + got := poolerExists(context.Background(), c, cluster, "rw") + + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestDeleteConnectionPoolers(t *testing.T) { + scheme := runtime.NewScheme() + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + + rwPooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-rw", + Namespace: "default", + }, + } + roPooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-ro", + Namespace: "default", + }, + } + + t.Run("deletes both poolers when they exist", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rwPooler.DeepCopy(), roPooler.DeepCopy()).Build() + + err := deleteConnectionPoolers(context.Background(), c, cluster) + + require.NoError(t, err) + assert.True(t, apierrors.IsNotFound(c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-rw", Namespace: "default"}, &cnpgv1.Pooler{}))) + assert.True(t, apierrors.IsNotFound(c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-ro", Namespace: "default"}, &cnpgv1.Pooler{}))) + }) + + t.Run("no-op when no poolers exist", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + err := deleteConnectionPoolers(context.Background(), c, cluster) + + require.NoError(t, err) + }) +} + +func TestEnsureClusterSecret(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + t.Run("creates secret with credentials and owner reference", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + + err := ensureClusterSecret(context.Background(), c, scheme, cluster, "my-secret", &corev1.Secret{}) + + require.NoError(t, err) + secret := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-secret", Namespace: "default"}, secret)) + assert.Equal(t, "my-secret", secret.Name) + assert.Equal(t, "default", secret.Namespace) + assert.Equal(t, corev1.SecretTypeOpaque, secret.Type) + require.Len(t, secret.OwnerReferences, 1) + assert.Equal(t, "cluster-uid", string(secret.OwnerReferences[0].UID)) + }) + + t.Run("no-op when secret already exists", func(t *testing.T) { + existing := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-secret", + Namespace: "default", + }, + StringData: map[string]string{"username": "existing-user"}, + } + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing).Build() + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + + err := ensureClusterSecret(context.Background(), c, scheme, cluster, "my-secret", &corev1.Secret{}) + + require.NoError(t, err) + }) +} + +func TestArePoolersReady(t *testing.T) { + makePooler := func(desired, actual int32) *cnpgv1.Pooler { + return &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(desired)}, + Status: cnpgv1.PoolerStatus{Instances: actual}, + } + } + + tests := []struct { + name string + rw *cnpgv1.Pooler + ro *cnpgv1.Pooler + expected bool + }{ + { + name: "returns true when both poolers are ready", + rw: makePooler(2, 2), + ro: makePooler(2, 2), + expected: true, + }, + { + name: "returns false when rw pooler not ready", + rw: makePooler(2, 0), + ro: makePooler(2, 2), + expected: false, + }, + { + name: "returns false when ro pooler not ready", + rw: makePooler(2, 2), + ro: makePooler(2, 1), + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := arePoolersReady(tt.rw, tt.ro) + + assert.Equal(t, tt.expected, got) + }) + } +} + +func TestCreateConnectionPooler(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + poolerInstances := int32(2) + poolerMode := enterprisev4.ConnectionPoolerModeTransaction + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + cnpg := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + cfg := &MergedConfig{ + CNPG: &enterprisev4.CNPGConfig{ + ConnectionPooler: &enterprisev4.ConnectionPoolerConfig{ + Instances: &poolerInstances, + Mode: &poolerMode, + Config: map[string]string{"default_pool_size": "25"}, + }, + }, + } + + tests := []struct { + name string + objects []client.Object + expectInstances int32 + }{ + { + name: "creates pooler when it does not exist", + objects: nil, + expectInstances: 2, + }, + { + name: "no-op when pooler already exists", + objects: []client.Object{ + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster-pooler-rw", + Namespace: "default", + }, + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(1))}, + }, + }, + expectInstances: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objects...).Build() + + err := createConnectionPooler(context.Background(), c, scheme, cluster.DeepCopy(), cfg, cnpg, "rw") + + require.NoError(t, err) + fetched := &cnpgv1.Pooler{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-rw", Namespace: "default"}, fetched)) + require.NotNil(t, fetched.Spec.Instances) + assert.Equal(t, tt.expectInstances, *fetched.Spec.Instances) + }) + } +} + +func TestGenerateConfigMap(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + cnpgCluster := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + + t.Run("base endpoints without poolers", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + cm, err := generateConfigMap(context.Background(), c, scheme, cluster.DeepCopy(), cnpgCluster, "my-secret") + + require.NoError(t, err) + assert.Equal(t, "my-cluster-configmap", cm.Name) + assert.Equal(t, "default", cm.Namespace) + assert.Equal(t, "my-cluster-rw.default", cm.Data["CLUSTER_RW_ENDPOINT"]) + assert.Equal(t, "my-cluster-ro.default", cm.Data["CLUSTER_RO_ENDPOINT"]) + assert.Equal(t, "my-cluster-r.default", cm.Data["CLUSTER_R_ENDPOINT"]) + assert.Equal(t, "5432", cm.Data["DEFAULT_CLUSTER_PORT"]) + assert.Equal(t, "postgres", cm.Data["SUPER_USER_NAME"]) + assert.Equal(t, "my-secret", cm.Data["SUPER_USER_SECRET_REF"]) + assert.NotContains(t, cm.Data, "CLUSTER_POOLER_RW_ENDPOINT") + require.Len(t, cm.OwnerReferences, 1) + assert.Equal(t, "cluster-uid", string(cm.OwnerReferences[0].UID)) + }) + + t.Run("includes pooler endpoints when poolers exist", func(t *testing.T) { + rwPooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cluster-pooler-rw", Namespace: "default"}, + } + roPooler := &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cluster-pooler-ro", Namespace: "default"}, + } + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(rwPooler, roPooler).Build() + cm, err := generateConfigMap(context.Background(), c, scheme, cluster.DeepCopy(), cnpgCluster, "my-secret") + + require.NoError(t, err) + assert.Equal(t, "my-cluster-pooler-rw.default", cm.Data["CLUSTER_POOLER_RW_ENDPOINT"]) + assert.Equal(t, "my-cluster-pooler-ro.default", cm.Data["CLUSTER_POOLER_RO_ENDPOINT"]) + }) + + t.Run("uses existing configmap name from status", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + pg := cluster.DeepCopy() + pg.Status.Resources = &enterprisev4.PostgresClusterResources{ + ConfigMapRef: &corev1.LocalObjectReference{Name: "custom-configmap"}, + } + + cm, err := generateConfigMap(context.Background(), c, scheme, pg, cnpgCluster, "my-secret") + + require.NoError(t, err) + assert.Equal(t, "custom-configmap", cm.Name) + }) +} + +func TestPoolerInstanceCount(t *testing.T) { + tests := []struct { + name string + pooler *cnpgv1.Pooler + expectedDesired int32 + expectedScheduled int32 + }{ + { + name: "nil instances defaults desired to 1", + pooler: &cnpgv1.Pooler{ + Status: cnpgv1.PoolerStatus{Instances: 3}, + }, + expectedDesired: 1, + expectedScheduled: 3, + }, + { + name: "explicit instances returns spec value", + pooler: &cnpgv1.Pooler{ + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(5))}, + Status: cnpgv1.PoolerStatus{Instances: 2}, + }, + expectedDesired: 5, + expectedScheduled: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + desired, scheduled := poolerInstanceCount(tt.pooler) + + assert.Equal(t, tt.expectedDesired, desired) + assert.Equal(t, tt.expectedScheduled, scheduled) + }) + } +} + +func TestGeneratePassword(t *testing.T) { + pw, err := generatePassword() + + require.NoError(t, err) + assert.Len(t, pw, 32) + + t.Run("generates unique passwords", func(t *testing.T) { + pw2, err := generatePassword() + + require.NoError(t, err) + assert.NotEqual(t, pw, pw2) + }) +} + +func TestCreateOrUpdateConnectionPoolers(t *testing.T) { + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + cnpgv1.AddToScheme(scheme) + enterprisev4.AddToScheme(scheme) + + poolerInstances := int32(2) + poolerMode := enterprisev4.ConnectionPoolerModeTransaction + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + UID: "cluster-uid", + }, + } + cnpgCluster := &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "my-cluster", + Namespace: "default", + }, + } + cfg := &MergedConfig{ + CNPG: &enterprisev4.CNPGConfig{ + ConnectionPooler: &enterprisev4.ConnectionPoolerConfig{ + Instances: &poolerInstances, + Mode: &poolerMode, + Config: map[string]string{"default_pool_size": "25"}, + }, + }, + } + + expectedPoolerSpec := func(poolerType string) cnpgv1.PoolerSpec { + return cnpgv1.PoolerSpec{ + Cluster: cnpgv1.LocalObjectReference{Name: "my-cluster"}, + Instances: ptr.To(int32(2)), + Type: cnpgv1.PoolerType(poolerType), + PgBouncer: &cnpgv1.PgBouncerSpec{ + PoolMode: cnpgv1.PgBouncerPoolMode("transaction"), + Parameters: map[string]string{"default_pool_size": "25"}, + }, + } + } + + t.Run("creates both rw and ro poolers", func(t *testing.T) { + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + err := createOrUpdateConnectionPoolers(context.Background(), c, scheme, cluster.DeepCopy(), cfg, cnpgCluster) + + require.NoError(t, err) + + rw := &cnpgv1.Pooler{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-rw", Namespace: "default"}, rw)) + assert.Equal(t, expectedPoolerSpec("rw"), rw.Spec) + require.Len(t, rw.OwnerReferences, 1) + assert.Equal(t, "cluster-uid", string(rw.OwnerReferences[0].UID)) + + ro := &cnpgv1.Pooler{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-ro", Namespace: "default"}, ro)) + assert.Equal(t, expectedPoolerSpec("ro"), ro.Spec) + require.Len(t, ro.OwnerReferences, 1) + assert.Equal(t, "cluster-uid", string(ro.OwnerReferences[0].UID)) + }) + + t.Run("no-op when both poolers already exist", func(t *testing.T) { + existing := []client.Object{ + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cluster-pooler-rw", Namespace: "default"}, + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(1))}, + }, + &cnpgv1.Pooler{ + ObjectMeta: metav1.ObjectMeta{Name: "my-cluster-pooler-ro", Namespace: "default"}, + Spec: cnpgv1.PoolerSpec{Instances: ptr.To(int32(1))}, + }, + } + c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(existing...).Build() + + err := createOrUpdateConnectionPoolers(context.Background(), c, scheme, cluster.DeepCopy(), cfg, cnpgCluster) + + require.NoError(t, err) + rw := &cnpgv1.Pooler{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-rw", Namespace: "default"}, rw)) + assert.Equal(t, int32(1), *rw.Spec.Instances) + ro := &cnpgv1.Pooler{} + require.NoError(t, c.Get(context.Background(), client.ObjectKey{Name: "my-cluster-pooler-ro", Namespace: "default"}, ro)) + assert.Equal(t, int32(1), *ro.Spec.Instances) + }) +} diff --git a/pkg/postgresql/cluster/core/types.go b/pkg/postgresql/cluster/core/types.go new file mode 100644 index 000000000..19886fd73 --- /dev/null +++ b/pkg/postgresql/cluster/core/types.go @@ -0,0 +1,102 @@ +package core + +import ( + "time" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" +) + +// normalizedCNPGClusterSpec is a subset of cnpgv1.ClusterSpec fields used for drift detection. +// Only fields we set in buildCNPGClusterSpec are included — CNPG-injected defaults are excluded +// to avoid false-positive drift on every reconcile. +type normalizedCNPGClusterSpec struct { + ImageName string + Instances int + CustomDefinedParameters map[string]string + PgHBA []string + DefaultDatabase string + Owner string + StorageSize string + Resources corev1.ResourceRequirements +} + +// MergedConfig is the resolved configuration after overlaying PostgresCluster on PostgresClusterClass defaults. +type MergedConfig struct { + Spec *enterprisev4.PostgresClusterSpec + CNPG *enterprisev4.CNPGConfig +} + +type reconcileClusterPhases string +type conditionTypes string +type conditionReasons string +type objectKind string + +const ( + retryDelay = time.Second * 15 + + readOnlyEndpoint string = "ro" + readWriteEndpoint string = "rw" + + defaultDatabaseName string = "postgres" + superUsername string = "postgres" + defaultPort string = "5432" + + secretKeyPassword string = "password" + defaultSecretSuffix string = "-secret" + defaultPoolerSuffix string = "-pooler-" + defaultConfigMapSuffix string = "-configmap" + + clusterDeletionPolicyDelete string = "Delete" + clusterDeletionPolicyRetain string = "Retain" + + // PostgresClusterFinalizerName is exported so the primary adapter (controller) can + // reference it in event predicates without duplicating the string. + PostgresClusterFinalizerName string = "postgresclusters.enterprise.splunk.com/finalizer" + + // cluster phases + readyClusterPhase reconcileClusterPhases = "Ready" + pendingClusterPhase reconcileClusterPhases = "Pending" + provisioningClusterPhase reconcileClusterPhases = "Provisioning" + configuringClusterPhase reconcileClusterPhases = "Configuring" + failedClusterPhase reconcileClusterPhases = "Failed" + + // condition types + clusterReady conditionTypes = "ClusterReady" + poolerReady conditionTypes = "PoolerReady" + + // condition reasons — clusterReady + reasonClusterClassNotFound conditionReasons = "ClusterClassNotFound" + reasonManagedRolesFailed conditionReasons = "ManagedRolesReconciliationFailed" + reasonClusterBuildFailed conditionReasons = "ClusterBuildFailed" + reasonClusterBuildSucceeded conditionReasons = "ClusterBuildSucceeded" + reasonClusterGetFailed conditionReasons = "ClusterGetFailed" + reasonClusterPatchFailed conditionReasons = "ClusterPatchFailed" + reasonInvalidConfiguration conditionReasons = "InvalidConfiguration" + reasonConfigMapFailed conditionReasons = "ConfigMapReconciliationFailed" + reasonUserSecretFailed conditionReasons = "UserSecretReconciliationFailed" + reasonSuperUserSecretFailed conditionReasons = "SuperUserSecretFailed" + reasonClusterDeleteFailed conditionReasons = "ClusterDeleteFailed" + + // condition reasons — poolerReady + reasonPoolerReconciliationFailed conditionReasons = "PoolerReconciliationFailed" + reasonPoolerConfigMissing conditionReasons = "PoolerConfigMissing" + reasonPoolerCreating conditionReasons = "PoolerCreating" + reasonAllInstancesReady conditionReasons = "AllInstancesReady" + + // condition reasons — CNPG cluster phase mapping + reasonCNPGClusterNotHealthy conditionReasons = "CNPGClusterNotHealthy" + reasonCNPGClusterHealthy conditionReasons = "CNPGClusterHealthy" + reasonCNPGProvisioning conditionReasons = "CNPGClusterProvisioning" + reasonCNPGSwitchover conditionReasons = "CNPGSwitchover" + reasonCNPGFailingOver conditionReasons = "CNPGFailingOver" + reasonCNPGRestarting conditionReasons = "CNPGRestarting" + reasonCNPGUpgrading conditionReasons = "CNPGUpgrading" + reasonCNPGApplyingConfig conditionReasons = "CNPGApplyingConfiguration" + reasonCNPGPromoting conditionReasons = "CNPGPromoting" + reasonCNPGWaitingForUser conditionReasons = "CNPGWaitingForUser" + reasonCNPGUnrecoverable conditionReasons = "CNPGUnrecoverable" + reasonCNPGProvisioningFailed conditionReasons = "CNPGProvisioningFailed" + reasonCNPGPluginError conditionReasons = "CNPGPluginError" + reasonCNPGImageError conditionReasons = "CNPGImageError" +) diff --git a/pkg/postgresql/database/adapter/db_repository.go b/pkg/postgresql/database/adapter/db_repository.go new file mode 100644 index 000000000..0b23f685c --- /dev/null +++ b/pkg/postgresql/database/adapter/db_repository.go @@ -0,0 +1,80 @@ +// Package adapter contains driven adapters for the PostgresDatabase domain. +// Each adapter implements a port defined in core/ports.go. +package adapter + +import ( + "context" + "fmt" + "time" + + dbcore "github.com/splunk/splunk-operator/pkg/postgresql/database/core" + + "github.com/jackc/pgx/v5" +) + +const ( + superUsername = "postgres" + postgresPort = "5432" + dbConnectTimeout = 10 * time.Second +) + +// pgDBRepository is the pgx-backed adapter for the core.DBRepo port. +// It owns the full connection lifecycle: open on construction, close on ExecGrants return. +type pgDBRepository struct { + conn *pgx.Conn +} + +// ExecGrants applies all privilege grants needed for the RW role on a single database. +// GRANT ON ALL TABLES/SEQUENCES covers existing objects; ALTER DEFAULT PRIVILEGES covers +// future ones created by the admin role (e.g. via migrations). +func (r *pgDBRepository) ExecGrants(ctx context.Context, dbName string) error { + defer r.conn.Close(context.Background()) + + adminRole := dbName + "_admin" + rwRole := dbName + "_rw" + + tx, err := r.conn.Begin(ctx) + if err != nil { + return fmt.Errorf("beginning transaction: %w", err) + } + + // Identifiers cannot be parameterised in PostgreSQL — fmt.Sprintf is correct here. + // Role names are generated internally by our own functions, never from user input. + stmts := []string{ + fmt.Sprintf("GRANT CONNECT ON DATABASE %s TO %s", dbName, rwRole), + fmt.Sprintf("GRANT USAGE ON SCHEMA public TO %s", rwRole), + fmt.Sprintf("GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO %s", rwRole), + fmt.Sprintf("GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA public TO %s", rwRole), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO %s", adminRole, rwRole), + fmt.Sprintf("ALTER DEFAULT PRIVILEGES FOR ROLE %s IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO %s", adminRole, rwRole), + } + + for _, stmt := range stmts { + if _, err := tx.Exec(ctx, stmt); err != nil { + return fmt.Errorf("executing grant %q: %w", stmt, err) + } + } + + return tx.Commit(ctx) +} + +// NewDBRepository opens a direct superuser connection, bypassing any pooler. +// PgBouncer in transaction mode blocks DDL; password is set on the config +// struct to avoid URL-encoding issues with special characters. +func NewDBRepository(ctx context.Context, host, dbName, password string) (dbcore.DBRepo, error) { + cfg, err := pgx.ParseConfig(fmt.Sprintf( + "postgres://%s@%s:%s/%s?sslmode=require&connect_timeout=%d", + superUsername, host, postgresPort, dbName, + int(dbConnectTimeout.Seconds()), + )) + if err != nil { + return nil, fmt.Errorf("parsing connection config for %s/%s: %w", host, dbName, err) + } + cfg.Password = password + + conn, err := pgx.ConnectConfig(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("connecting to %s/%s: %w", host, dbName, err) + } + return &pgDBRepository{conn: conn}, nil +} diff --git a/pkg/postgresql/database/core/database.go b/pkg/postgresql/database/core/database.go new file mode 100644 index 000000000..50c99beed --- /dev/null +++ b/pkg/postgresql/database/core/database.go @@ -0,0 +1,936 @@ +package core + +import ( + "context" + "encoding/json" + stderrors "errors" + "fmt" + "slices" + "strings" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + "github.com/sethvargo/go-password/password" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// NewDBRepoFunc constructs a DBRepo adapter for the given host and database. +// Injected by the controller so the core never imports the pgx adapter directly. +type NewDBRepoFunc func(ctx context.Context, host, dbName, password string) (DBRepo, error) + +// PostgresDatabaseService is the application service entry point called by the primary adapter (reconciler). +// newDBRepo is injected to keep the core free of pgx imports. +func PostgresDatabaseService( + ctx context.Context, + c client.Client, + scheme *runtime.Scheme, + postgresDB *enterprisev4.PostgresDatabase, + newDBRepo NewDBRepoFunc, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + logger.Info("Reconciling PostgresDatabase", "name", postgresDB.Name, "namespace", postgresDB.Namespace) + + updateStatus := func(conditionType conditionTypes, conditionStatus metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileDBPhases) error { + return persistStatus(ctx, c, postgresDB, conditionType, conditionStatus, reason, message, phase) + } + + // Finalizer: cleanup on deletion, register on creation. + if postgresDB.GetDeletionTimestamp() != nil { + if err := handleDeletion(ctx, c, postgresDB); err != nil { + logger.Error(err, "Cleanup failed for PostgresDatabase") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + if !controllerutil.ContainsFinalizer(postgresDB, postgresDatabaseFinalizerName) { + controllerutil.AddFinalizer(postgresDB, postgresDatabaseFinalizerName) + if err := c.Update(ctx, postgresDB); err != nil { + logger.Error(err, "Failed to add finalizer to PostgresDatabase") + return ctrl.Result{}, err + } + return ctrl.Result{}, nil + } + + // ObservedGeneration equality means all phases completed on the current spec — nothing to do. + if postgresDB.Status.ObservedGeneration != nil && *postgresDB.Status.ObservedGeneration == postgresDB.Generation { + logger.Info("Spec unchanged and all phases complete, skipping") + return ctrl.Result{}, nil + } + + // Phase: ClusterValidation + cluster, err := fetchCluster(ctx, c, postgresDB) + if err != nil { + if errors.IsNotFound(err) { + if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterNotFound, "Cluster CR not found", pendingDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: clusterNotFoundRetryDelay}, nil + } + if statusErr := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterInfoFetchFailed, + "Can't reach Cluster CR due to transient errors", pendingDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + clusterStatus := getClusterReadyStatus(cluster) + logger.Info("Cluster validation done", "clusterName", postgresDB.Spec.ClusterRef.Name, "status", clusterStatus) + + switch clusterStatus { + case ClusterNotReady, ClusterNoProvisionerRef: + if err := updateStatus(clusterReady, metav1.ConditionFalse, reasonClusterProvisioning, "Cluster is not in ready state yet", pendingDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + + case ClusterReady: + if err := updateStatus(clusterReady, metav1.ConditionTrue, reasonClusterAvailable, "Cluster is operational", provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + } + + // Phase: RoleConflictCheck — verify no other SSA field manager already owns our roles. + roleConflicts := getRoleConflicts(postgresDB, cluster) + if len(roleConflicts) > 0 { + conflictMsg := fmt.Sprintf("Role conflict: %s. "+ + "If you deleted a previous PostgresDatabase, recreate it with the original name to re-adopt the orphaned resources.", + strings.Join(roleConflicts, ", ")) + logger.Error(nil, conflictMsg) + if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonRoleConflict, conflictMsg, failedDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, nil + } + + // We need the CNPG Cluster directly because PostgresCluster status does not yet + // surface managed role reconciliation state. + cnpgCluster := &cnpgv1.Cluster{} + if err := c.Get(ctx, types.NamespacedName{ + Name: cluster.Status.ProvisionerRef.Name, + Namespace: cluster.Status.ProvisionerRef.Namespace, + }, cnpgCluster); err != nil { + logger.Error(err, "Failed to fetch CNPG Cluster") + return ctrl.Result{}, err + } + + // Phase: CredentialProvisioning — secrets must exist before roles are patched. + // CNPG rejects a PasswordSecretRef pointing at a missing secret. + if err := reconcileUserSecrets(ctx, c, scheme, postgresDB); err != nil { + if statusErr := updateStatus(secretsReady, metav1.ConditionFalse, reasonSecretsCreationFailed, + fmt.Sprintf("Failed to reconcile user secrets: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(secretsReady, metav1.ConditionTrue, reasonSecretsCreated, + fmt.Sprintf("All secrets provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: ConnectionMetadata — ConfigMaps carry connection info consumers need as soon + // as databases are ready, so they are created alongside secrets. + endpoints := resolveClusterEndpoints(cluster, cnpgCluster, postgresDB.Namespace) + if err := reconcileRoleConfigMaps(ctx, c, scheme, postgresDB, endpoints); err != nil { + if statusErr := updateStatus(configMapsReady, metav1.ConditionFalse, reasonConfigMapsCreationFailed, + fmt.Sprintf("Failed to reconcile ConfigMaps: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(configMapsReady, metav1.ConditionTrue, reasonConfigMapsCreated, + fmt.Sprintf("All ConfigMaps provisioned for %d databases", len(postgresDB.Spec.Databases)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: RoleProvisioning + desiredUsers := getDesiredUsers(postgresDB) + actualRoles := getUsersInClusterSpec(cluster) + var missing []string + for _, role := range desiredUsers { + if !slices.Contains(actualRoles, role) { + missing = append(missing, role) + } + } + + if len(missing) > 0 { + logger.Info("User spec changed, patching CNPG Cluster", "missing", missing) + if err := patchManagedRoles(ctx, c, postgresDB, cluster); err != nil { + logger.Error(err, "Failed to patch users in CNPG Cluster") + return ctrl.Result{}, err + } + if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, + fmt.Sprintf("Waiting for %d roles to be reconciled", len(desiredUsers)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + + notReadyRoles, err := verifyRolesReady(ctx, desiredUsers, cnpgCluster) + if err != nil { + if statusErr := updateStatus(rolesReady, metav1.ConditionFalse, reasonUsersCreationFailed, + fmt.Sprintf("Role creation failed: %v", err), failedDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if len(notReadyRoles) > 0 { + if err := updateStatus(rolesReady, metav1.ConditionFalse, reasonWaitingForCNPG, + fmt.Sprintf("Waiting for roles to be reconciled: %v", notReadyRoles), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + if err := updateStatus(rolesReady, metav1.ConditionTrue, reasonUsersAvailable, + fmt.Sprintf("All %d users in PostgreSQL", len(desiredUsers)), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: DatabaseProvisioning + if err := reconcileCNPGDatabases(ctx, c, scheme, postgresDB, cluster); err != nil { + logger.Error(err, "Failed to reconcile CNPG Databases") + return ctrl.Result{}, err + } + + notReadyDBs, err := verifyDatabasesReady(ctx, c, postgresDB) + if err != nil { + logger.Error(err, "Failed to verify database status") + return ctrl.Result{}, err + } + if len(notReadyDBs) > 0 { + if err := updateStatus(databasesReady, metav1.ConditionFalse, reasonWaitingForCNPG, + fmt.Sprintf("Waiting for databases to be ready: %v", notReadyDBs), provisioningDBPhase); err != nil { + return ctrl.Result{}, err + } + return ctrl.Result{RequeueAfter: retryDelay}, nil + } + if err := updateStatus(databasesReady, metav1.ConditionTrue, reasonDatabasesAvailable, + fmt.Sprintf("All %d databases ready", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { + return ctrl.Result{}, err + } + + // Phase: RWRolePrivileges + // Skipped when no new databases are detected — ALTER DEFAULT PRIVILEGES covers tables + // added by migrations on existing databases. Re-runs for all databases when a new one + // is added (idempotent for existing ones, required for the new one). + if hasNewDatabases(postgresDB) { + // Read from our own status — we created this secret and wrote the SecretKeySelector + // (name + key) when the cluster was provisioned. This avoids depending on CNPG's + // spec field and makes the key explicit. + if cluster.Status.Resources == nil || cluster.Status.Resources.SuperUserSecretRef == nil { + return ctrl.Result{}, fmt.Errorf("PostgresCluster %s has no superuser secret ref in status", cluster.Name) + } + superSecretRef := cluster.Status.Resources.SuperUserSecretRef + superSecret := &corev1.Secret{} + if err := c.Get(ctx, types.NamespacedName{ + Name: superSecretRef.Name, + Namespace: postgresDB.Namespace, + }, superSecret); err != nil { + return ctrl.Result{}, fmt.Errorf("fetching superuser secret %s: %w", superSecretRef.Name, err) + } + pw, ok := superSecret.Data[superSecretRef.Key] + if !ok { + return ctrl.Result{}, fmt.Errorf("superuser secret %s missing %q key", superSecretRef.Name, superSecretRef.Key) + } + + dbNames := make([]string, 0, len(postgresDB.Spec.Databases)) + for _, dbSpec := range postgresDB.Spec.Databases { + dbNames = append(dbNames, dbSpec.Name) + } + + if err := reconcileRWRolePrivileges(ctx, endpoints.RWHost, string(pw), dbNames, newDBRepo); err != nil { + if statusErr := updateStatus(privilegesReady, metav1.ConditionFalse, reasonPrivilegesGrantFailed, + fmt.Sprintf("Failed to grant RW role privileges: %v", err), provisioningDBPhase); statusErr != nil { + logger.Error(statusErr, "Failed to update status") + } + return ctrl.Result{}, err + } + if err := updateStatus(privilegesReady, metav1.ConditionTrue, reasonPrivilegesGranted, + fmt.Sprintf("RW role privileges granted for all %d databases", len(postgresDB.Spec.Databases)), readyDBPhase); err != nil { + return ctrl.Result{}, err + } + } + + postgresDB.Status.Databases = populateDatabaseStatus(postgresDB) + postgresDB.Status.ObservedGeneration = &postgresDB.Generation + + if err := c.Status().Update(ctx, postgresDB); err != nil { + if errors.IsConflict(err) { + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, fmt.Errorf("persisting final status: %w", err) + } + + logger.Info("All phases complete") + return ctrl.Result{}, nil +} + +// reconcileRWRolePrivileges calls the DBRepo port for each database. +// Errors are collected so all databases are attempted before returning. +func reconcileRWRolePrivileges( + ctx context.Context, + rwHost, superPassword string, + dbNames []string, + newDBRepo NewDBRepoFunc, +) error { + logger := log.FromContext(ctx) + var errs []error + for _, dbName := range dbNames { + repo, err := newDBRepo(ctx, rwHost, dbName, superPassword) + if err != nil { + logger.Error(err, "Failed to connect to database", "database", dbName) + errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) + continue + } + if err := repo.ExecGrants(ctx, dbName); err != nil { + logger.Error(err, "Failed to grant RW role privileges", "database", dbName) + errs = append(errs, fmt.Errorf("database %s: %w", dbName, err)) + continue + } + logger.Info("RW role privileges granted", "database", dbName, "rwRole", rwRoleName(dbName)) + } + return stderrors.Join(errs...) +} + +func fetchCluster(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase) (*enterprisev4.PostgresCluster, error) { + logger := log.FromContext(ctx) + cluster := &enterprisev4.PostgresCluster{} + if err := c.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { + if errors.IsNotFound(err) { + return nil, err + } + logger.Error(err, "Failed to fetch Cluster", "name", postgresDB.Spec.ClusterRef.Name) + return nil, err + } + return cluster, nil +} + +func getClusterReadyStatus(cluster *enterprisev4.PostgresCluster) clusterReadyStatus { + if cluster.Status.Phase == nil || *cluster.Status.Phase != string(ClusterReady) { + return ClusterNotReady + } + if cluster.Status.ProvisionerRef == nil { + return ClusterNoProvisionerRef + } + return ClusterReady +} + +func getDesiredUsers(postgresDB *enterprisev4.PostgresDatabase) []string { + users := make([]string, 0, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + users = append(users, adminRoleName(dbSpec.Name), rwRoleName(dbSpec.Name)) + } + return users +} + +func getUsersInClusterSpec(cluster *enterprisev4.PostgresCluster) []string { + users := make([]string, 0, len(cluster.Spec.ManagedRoles)) + for _, role := range cluster.Spec.ManagedRoles { + users = append(users, role.Name) + } + return users +} + +func getRoleConflicts(postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) []string { + myManager := fieldManagerName(postgresDB.Name) + desired := make(map[string]struct{}, len(postgresDB.Spec.Databases)*2) + for _, dbSpec := range postgresDB.Spec.Databases { + desired[adminRoleName(dbSpec.Name)] = struct{}{} + desired[rwRoleName(dbSpec.Name)] = struct{}{} + } + roleOwners := managedRoleOwners(cluster.ManagedFields) + var conflicts []string + for roleName := range desired { + if owner, exists := roleOwners[roleName]; exists && owner != myManager { + conflicts = append(conflicts, fmt.Sprintf("%s (owned by %s)", roleName, owner)) + } + } + return conflicts +} + +func managedRoleOwners(managedFields []metav1.ManagedFieldsEntry) map[string]string { + owners := make(map[string]string) + for _, mf := range managedFields { + if mf.FieldsV1 == nil { + continue + } + for _, name := range parseRoleNames(mf.FieldsV1.Raw) { + owners[name] = mf.Manager + } + } + return owners +} + +func parseRoleNames(raw []byte) []string { + var fields map[string]any + if err := json.Unmarshal(raw, &fields); err != nil { + return nil + } + spec, _ := fields["f:spec"].(map[string]any) + roles, _ := spec["f:managedRoles"].(map[string]any) + var names []string + for key := range roles { + var k struct{ Name string } + if err := json.Unmarshal([]byte(strings.TrimPrefix(key, "k:")), &k); err == nil && k.Name != "" { + names = append(names, k.Name) + } + } + return names +} + +func patchManagedRoles(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) error { + logger := log.FromContext(ctx) + allRoles := buildManagedRoles(postgresDB.Name, postgresDB.Spec.Databases) + rolePatch := buildManagedRolesPatch(cluster, allRoles) + fieldManager := fieldManagerName(postgresDB.Name) + if err := c.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManager)); err != nil { + logger.Error(err, "Failed to add users to PostgresCluster", "postgresDatabase", postgresDB.Name) + return fmt.Errorf("patching managed roles for PostgresDatabase %s: %w", postgresDB.Name, err) + } + logger.Info("Users added to PostgresCluster via SSA", "postgresDatabase", postgresDB.Name, "roleCount", len(allRoles)) + return nil +} + +func verifyRolesReady(ctx context.Context, expectedUsers []string, cnpgCluster *cnpgv1.Cluster) ([]string, error) { + logger := log.FromContext(ctx) + if cnpgCluster.Status.ManagedRolesStatus.CannotReconcile != nil { + for _, userName := range expectedUsers { + if errs, exists := cnpgCluster.Status.ManagedRolesStatus.CannotReconcile[userName]; exists { + return nil, fmt.Errorf("user %s reconciliation failed: %v", userName, errs) + } + } + } + reconciled := cnpgCluster.Status.ManagedRolesStatus.ByStatus[cnpgv1.RoleStatusReconciled] + var notReady []string + for _, userName := range expectedUsers { + if !slices.Contains(reconciled, userName) { + notReady = append(notReady, userName) + } + } + if len(notReady) > 0 { + logger.Info("Users not reconciled yet", "pending", notReady) + } + return notReady, nil +} + +func reconcileCNPGDatabases(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster) error { + logger := log.FromContext(ctx) + for _, dbSpec := range postgresDB.Spec.Databases { + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + cnpgDB := &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{Name: cnpgDBName, Namespace: postgresDB.Namespace}, + } + _, err := controllerutil.CreateOrUpdate(ctx, c, cnpgDB, func() error { + cnpgDB.Spec = buildCNPGDatabaseSpec(cluster.Status.ProvisionerRef.Name, dbSpec) + reAdopting := cnpgDB.Annotations[annotationRetainedFrom] == postgresDB.Name + if reAdopting { + logger.Info("Re-adopting orphaned CNPG Database", "name", cnpgDBName) + delete(cnpgDB.Annotations, annotationRetainedFrom) + } + if cnpgDB.CreationTimestamp.IsZero() || reAdopting { + return controllerutil.SetControllerReference(postgresDB, cnpgDB, scheme) + } + return nil + }) + if err != nil { + return fmt.Errorf("reconciling CNPG Database %s: %w", cnpgDBName, err) + } + } + return nil +} + +func verifyDatabasesReady(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase) ([]string, error) { + var notReady []string + for _, dbSpec := range postgresDB.Spec.Databases { + cnpgDBName := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + cnpgDB := &cnpgv1.Database{} + if err := c.Get(ctx, types.NamespacedName{Name: cnpgDBName, Namespace: postgresDB.Namespace}, cnpgDB); err != nil { + return nil, fmt.Errorf("getting CNPG Database %s: %w", cnpgDBName, err) + } + if cnpgDB.Status.Applied == nil || !*cnpgDB.Status.Applied { + notReady = append(notReady, dbSpec.Name) + } + } + return notReady, nil +} + +func persistStatus(ctx context.Context, c client.Client, db *enterprisev4.PostgresDatabase, conditionType conditionTypes, conditionStatus metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileDBPhases) error { + applyStatus(db, conditionType, conditionStatus, reason, message, phase) + return c.Status().Update(ctx, db) +} + +func applyStatus(db *enterprisev4.PostgresDatabase, conditionType conditionTypes, conditionStatus metav1.ConditionStatus, reason conditionReasons, message string, phase reconcileDBPhases) { + meta.SetStatusCondition(&db.Status.Conditions, metav1.Condition{ + Type: string(conditionType), + Status: conditionStatus, + Reason: string(reason), + Message: message, + ObservedGeneration: db.Generation, + }) + p := string(phase) + db.Status.Phase = &p +} + +func buildDeletionPlan(databases []enterprisev4.DatabaseDefinition) deletionPlan { + var plan deletionPlan + for _, db := range databases { + if db.DeletionPolicy == deletionPolicyRetain { + plan.retained = append(plan.retained, db) + } else { + plan.deleted = append(plan.deleted, db) + } + } + return plan +} + +func handleDeletion(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase) error { + plan := buildDeletionPlan(postgresDB.Spec.Databases) + if err := orphanRetainedResources(ctx, c, postgresDB, plan.retained); err != nil { + return err + } + if err := deleteRemovedResources(ctx, c, postgresDB, plan.deleted); err != nil { + return err + } + if err := cleanupManagedRoles(ctx, c, postgresDB, plan); err != nil { + return err + } + controllerutil.RemoveFinalizer(postgresDB, postgresDatabaseFinalizerName) + if err := c.Update(ctx, postgresDB); err != nil { + if errors.IsNotFound(err) { + return nil + } + return fmt.Errorf("removing finalizer: %w", err) + } + log.FromContext(ctx).Info("Cleanup complete", "name", postgresDB.Name, "retained", len(plan.retained), "deleted", len(plan.deleted)) + return nil +} + +func orphanRetainedResources(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, retained []enterprisev4.DatabaseDefinition) error { + if err := orphanCNPGDatabases(ctx, c, postgresDB, retained); err != nil { + return err + } + if err := orphanConfigMaps(ctx, c, postgresDB, retained); err != nil { + return err + } + return orphanSecrets(ctx, c, postgresDB, retained) +} + +func deleteRemovedResources(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, deleted []enterprisev4.DatabaseDefinition) error { + if err := deleteCNPGDatabases(ctx, c, postgresDB, deleted); err != nil { + return err + } + if err := deleteConfigMaps(ctx, c, postgresDB, deleted); err != nil { + return err + } + return deleteSecrets(ctx, c, postgresDB, deleted) +} + +func cleanupManagedRoles(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, plan deletionPlan) error { + if len(plan.deleted) == 0 { + return nil + } + cluster := &enterprisev4.PostgresCluster{} + if err := c.Get(ctx, types.NamespacedName{Name: postgresDB.Spec.ClusterRef.Name, Namespace: postgresDB.Namespace}, cluster); err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("getting PostgresCluster for role cleanup: %w", err) + } + log.FromContext(ctx).Info("PostgresCluster already deleted, skipping role cleanup") + return nil + } + return patchManagedRolesOnDeletion(ctx, c, postgresDB, cluster, plan.retained) +} + +func orphanCNPGDatabases(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + db := &cnpgv1.Database{} + if err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: postgresDB.Namespace}, db); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("getting CNPG Database %s for orphaning: %w", name, err) + } + if db.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(db, postgresDB.UID) + if db.Annotations == nil { + db.Annotations = make(map[string]string) + } + db.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := c.Update(ctx, db); err != nil { + return fmt.Errorf("orphaning CNPG Database %s: %w", name, err) + } + logger.Info("Orphaned CNPG Database CR", "name", name) + } + return nil +} + +func orphanConfigMaps(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{} + if err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: postgresDB.Namespace}, cm); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("getting ConfigMap %s for orphaning: %w", name, err) + } + if cm.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(cm, postgresDB.UID) + if cm.Annotations == nil { + cm.Annotations = make(map[string]string) + } + cm.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := c.Update(ctx, cm); err != nil { + return fmt.Errorf("orphaning ConfigMap %s: %w", name, err) + } + logger.Info("Orphaned ConfigMap", "name", name) + } + return nil +} + +func orphanSecrets(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + for _, role := range []string{secretRoleAdmin, secretRoleRW} { + name := roleSecretName(postgresDB.Name, dbSpec.Name, role) + secret := &corev1.Secret{} + if err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: postgresDB.Namespace}, secret); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("getting Secret %s for orphaning: %w", name, err) + } + if secret.Annotations[annotationRetainedFrom] == postgresDB.Name { + continue + } + stripOwnerReference(secret, postgresDB.UID) + if secret.Annotations == nil { + secret.Annotations = make(map[string]string) + } + secret.Annotations[annotationRetainedFrom] = postgresDB.Name + if err := c.Update(ctx, secret); err != nil { + return fmt.Errorf("orphaning Secret %s: %w", name, err) + } + logger.Info("Orphaned Secret", "name", name) + } + } + return nil +} + +func deleteCNPGDatabases(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := cnpgDatabaseName(postgresDB.Name, dbSpec.Name) + db := &cnpgv1.Database{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: postgresDB.Namespace}} + if err := c.Delete(ctx, db); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("deleting CNPG Database %s: %w", name, err) + } + logger.Info("Deleted CNPG Database CR", "name", name) + } + return nil +} + +func deleteConfigMaps(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + name := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: postgresDB.Namespace}} + if err := c.Delete(ctx, cm); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("deleting ConfigMap %s: %w", name, err) + } + logger.Info("Deleted ConfigMap", "name", name) + } + return nil +} + +func deleteSecrets(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, databases []enterprisev4.DatabaseDefinition) error { + logger := log.FromContext(ctx) + for _, dbSpec := range databases { + for _, role := range []string{secretRoleAdmin, secretRoleRW} { + name := roleSecretName(postgresDB.Name, dbSpec.Name, role) + secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: postgresDB.Namespace}} + if err := c.Delete(ctx, secret); err != nil { + if errors.IsNotFound(err) { + continue + } + return fmt.Errorf("deleting Secret %s: %w", name, err) + } + logger.Info("Deleted Secret", "name", name) + } + } + return nil +} + +func buildManagedRoles(postgresDBName string, databases []enterprisev4.DatabaseDefinition) []enterprisev4.ManagedRole { + roles := make([]enterprisev4.ManagedRole, 0, len(databases)*2) + for _, dbSpec := range databases { + roles = append(roles, + enterprisev4.ManagedRole{ + Name: adminRoleName(dbSpec.Name), + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleAdmin)}, + Key: secretKeyPassword}, + }, + enterprisev4.ManagedRole{ + Name: rwRoleName(dbSpec.Name), + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDBName, dbSpec.Name, secretRoleRW)}, + Key: secretKeyPassword}, + }, + ) + } + return roles +} + +func buildManagedRolesPatch(cluster *enterprisev4.PostgresCluster, roles []enterprisev4.ManagedRole) *unstructured.Unstructured { + return &unstructured.Unstructured{ + Object: map[string]any{ + "apiVersion": cluster.APIVersion, + "kind": cluster.Kind, + "metadata": map[string]any{"name": cluster.Name, "namespace": cluster.Namespace}, + "spec": map[string]any{"managedRoles": roles}, + }, + } +} + +func patchManagedRolesOnDeletion(ctx context.Context, c client.Client, postgresDB *enterprisev4.PostgresDatabase, cluster *enterprisev4.PostgresCluster, retained []enterprisev4.DatabaseDefinition) error { + roles := buildManagedRoles(postgresDB.Name, retained) + rolePatch := buildManagedRolesPatch(cluster, roles) + if err := c.Patch(ctx, rolePatch, client.Apply, client.FieldOwner(fieldManagerName(postgresDB.Name))); err != nil { + return fmt.Errorf("patching managed roles on deletion: %w", err) + } + log.FromContext(ctx).Info("Patched managed roles on deletion", "postgresDatabase", postgresDB.Name, "retainedRoles", len(roles)) + return nil +} + +func stripOwnerReference(obj metav1.Object, ownerUID types.UID) { + refs := obj.GetOwnerReferences() + filtered := make([]metav1.OwnerReference, 0, len(refs)) + for _, ref := range refs { + if ref.UID != ownerUID { + filtered = append(filtered, ref) + } + } + obj.SetOwnerReferences(filtered) +} + +func adoptResource(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, obj client.Object) error { + annotations := obj.GetAnnotations() + delete(annotations, annotationRetainedFrom) + obj.SetAnnotations(annotations) + if err := controllerutil.SetControllerReference(postgresDB, obj, scheme); err != nil { + return err + } + return c.Update(ctx, obj) +} + +func reconcileUserSecrets(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase) error { + for _, dbSpec := range postgresDB.Spec.Databases { + if err := ensureSecret(ctx, c, scheme, postgresDB, adminRoleName(dbSpec.Name), roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)); err != nil { + return err + } + if err := ensureSecret(ctx, c, scheme, postgresDB, rwRoleName(dbSpec.Name), roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)); err != nil { + return err + } + } + return nil +} + +func ensureSecret(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, roleName, secretName string) error { + secret, err := getSecret(ctx, c, postgresDB.Namespace, secretName) + if err != nil { + return err + } + logger := log.FromContext(ctx) + switch { + case secret == nil: + logger.Info("Creating missing user secret", "name", secretName) + return createUserSecret(ctx, c, scheme, postgresDB, roleName, secretName) + case secret.Annotations[annotationRetainedFrom] == postgresDB.Name: + logger.Info("Re-adopting orphaned secret", "name", secretName) + return adoptResource(ctx, c, scheme, postgresDB, secret) + } + return nil +} + +func getSecret(ctx context.Context, c client.Client, namespace, name string) (*corev1.Secret, error) { + secret := &corev1.Secret{} + err := c.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) + if errors.IsNotFound(err) { + return nil, nil + } + if err != nil { + return nil, err + } + return secret, nil +} + +func createUserSecret(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, roleName, secretName string) error { + pw, err := generatePassword() + if err != nil { + return err + } + secret := buildPasswordSecret(postgresDB, secretName, roleName, pw) + if err := controllerutil.SetControllerReference(postgresDB, secret, scheme); err != nil { + return fmt.Errorf("setting owner reference on Secret %s: %w", secretName, err) + } + if err := c.Create(ctx, secret); err != nil { + if errors.IsAlreadyExists(err) { + return nil + } + return err + } + return nil +} + +func buildPasswordSecret(postgresDB *enterprisev4.PostgresDatabase, secretName, roleName, pw string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: postgresDB.Namespace, + Labels: map[string]string{labelManagedBy: "splunk-operator", labelCNPGReload: "true"}, + }, + Data: map[string][]byte{"username": []byte(roleName), secretKeyPassword: []byte(pw)}, + } +} + +func buildCNPGDatabaseSpec(clusterName string, dbSpec enterprisev4.DatabaseDefinition) cnpgv1.DatabaseSpec { + reclaimPolicy := cnpgv1.DatabaseReclaimDelete + if dbSpec.DeletionPolicy == deletionPolicyRetain { + reclaimPolicy = cnpgv1.DatabaseReclaimRetain + } + return cnpgv1.DatabaseSpec{ + Name: dbSpec.Name, + Owner: adminRoleName(dbSpec.Name), + ClusterRef: corev1.LocalObjectReference{Name: clusterName}, + ReclaimPolicy: reclaimPolicy, + } +} + +func reconcileRoleConfigMaps(ctx context.Context, c client.Client, scheme *runtime.Scheme, postgresDB *enterprisev4.PostgresDatabase, endpoints clusterEndpoints) error { + logger := log.FromContext(ctx) + for _, dbSpec := range postgresDB.Spec.Databases { + cmName := configMapName(postgresDB.Name, dbSpec.Name) + cm := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: postgresDB.Namespace, + Labels: map[string]string{labelManagedBy: "splunk-operator"}, + }, + } + _, err := controllerutil.CreateOrUpdate(ctx, c, cm, func() error { + cm.Data = buildDatabaseConfigMapBody(dbSpec.Name, endpoints) + reAdopting := cm.Annotations[annotationRetainedFrom] == postgresDB.Name + if reAdopting { + logger.Info("Re-adopting orphaned ConfigMap", "name", cmName) + delete(cm.Annotations, annotationRetainedFrom) + } + if cm.CreationTimestamp.IsZero() || reAdopting { + return controllerutil.SetControllerReference(postgresDB, cm, scheme) + } + return nil + }) + if err != nil { + return fmt.Errorf("reconciling ConfigMap %s: %w", cmName, err) + } + } + return nil +} + +func buildDatabaseConfigMapBody(dbName string, endpoints clusterEndpoints) map[string]string { + data := map[string]string{ + "dbname": dbName, + "port": postgresPort, + "rw-host": endpoints.RWHost, + "ro-host": endpoints.ROHost, + "admin-user": adminRoleName(dbName), + "rw-user": rwRoleName(dbName), + } + if endpoints.PoolerRWHost != "" { + data["pooler-rw-host"] = endpoints.PoolerRWHost + } + if endpoints.PoolerROHost != "" { + data["pooler-ro-host"] = endpoints.PoolerROHost + } + return data +} + +func resolveClusterEndpoints(cluster *enterprisev4.PostgresCluster, cnpgCluster *cnpgv1.Cluster, namespace string) clusterEndpoints { + // FQDN so consumers in other namespaces can resolve without extra config. + endpoints := clusterEndpoints{ + RWHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.WriteService, namespace), + ROHost: fmt.Sprintf("%s.%s.svc.cluster.local", cnpgCluster.Status.ReadService, namespace), + } + if cluster.Status.ConnectionPoolerStatus != nil && cluster.Status.ConnectionPoolerStatus.Enabled { + endpoints.PoolerRWHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readWriteEndpoint, namespace) + endpoints.PoolerROHost = fmt.Sprintf("%s-pooler-%s.%s.svc.cluster.local", cnpgCluster.Name, readOnlyEndpoint, namespace) + } + return endpoints +} + +func populateDatabaseStatus(postgresDB *enterprisev4.PostgresDatabase) []enterprisev4.DatabaseInfo { + databases := make([]enterprisev4.DatabaseInfo, 0, len(postgresDB.Spec.Databases)) + for _, dbSpec := range postgresDB.Spec.Databases { + databases = append(databases, enterprisev4.DatabaseInfo{ + Name: dbSpec.Name, + Ready: true, + AdminUserSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleAdmin)}, Key: secretKeyPassword}, + RWUserSecretRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: roleSecretName(postgresDB.Name, dbSpec.Name, secretRoleRW)}, Key: secretKeyPassword}, + ConfigMapRef: &corev1.LocalObjectReference{Name: configMapName(postgresDB.Name, dbSpec.Name)}, + }) + } + return databases +} + +func hasNewDatabases(postgresDB *enterprisev4.PostgresDatabase) bool { + existing := make(map[string]bool, len(postgresDB.Status.Databases)) + for _, dbInfo := range postgresDB.Status.Databases { + existing[dbInfo.Name] = true + } + for _, dbSpec := range postgresDB.Spec.Databases { + if !existing[dbSpec.Name] { + return true + } + } + return false +} + +// Naming helpers — single source of truth shared by creation and status wiring. +func fieldManagerName(postgresDBName string) string { return fieldManagerPrefix + postgresDBName } +func adminRoleName(dbName string) string { return dbName + "_admin" } +func rwRoleName(dbName string) string { return dbName + "_rw" } +func cnpgDatabaseName(postgresDBName, dbName string) string { + return fmt.Sprintf("%s-%s", postgresDBName, dbName) +} +func roleSecretName(postgresDBName, dbName, role string) string { + return fmt.Sprintf("%s-%s-%s", postgresDBName, dbName, role) +} +func configMapName(postgresDBName, dbName string) string { + return fmt.Sprintf("%s-%s-config", postgresDBName, dbName) +} + +// generatePassword uses crypto/rand (via sethvargo/go-password) — predictable passwords +// are unacceptable for credentials that protect live database access. +func generatePassword() (string, error) { + return password.Generate(passwordLength, passwordDigits, passwordSymbols, false, true) +} diff --git a/pkg/postgresql/database/core/database_unit_test.go b/pkg/postgresql/database/core/database_unit_test.go new file mode 100644 index 000000000..0bde24a16 --- /dev/null +++ b/pkg/postgresql/database/core/database_unit_test.go @@ -0,0 +1,1641 @@ +package core + +// The following functions are intentionally not tested directly here. +// Their business logic is covered by narrower helper tests where practical, +// and the remaining behavior is mostly controller-runtime orchestration: +// - PostgresDatabaseService +// - patchManagedRoles +// - reconcileCNPGDatabases +// - handleDeletion +// - orphanRetainedResources +// - deleteRemovedResources +// - cleanupManagedRoles + +import ( + "context" + "encoding/json" + "errors" + "testing" + "unicode" + + cnpgv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" + enterprisev4 "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/client/interceptor" +) + +// managedRolesFieldsRaw is a helper to construct the raw managed fields JSON for testing parseRoleNames and related functions. +func managedRolesFieldsRaw(t *testing.T, keys ...string) []byte { + t.Helper() + + managedRoles := make(map[string]any, len(keys)) + for _, key := range keys { + managedRoles[key] = map[string]any{} + } + + raw, err := json.Marshal(map[string]any{ + "f:spec": map[string]any{ + "f:managedRoles": managedRoles, + }, + }) + require.NoError(t, err) + + return raw +} + +type stubDBRepo struct { + execErr error + calls []string +} + +// ExecGrants is a stub implementation of the DBRepo interface that records calls and returns a predefined error. +func (r *stubDBRepo) ExecGrants(_ context.Context, dbName string) error { + r.calls = append(r.calls, dbName) + return r.execErr +} + +// boolPtr is a helper to get a pointer to a bool value, used for testing conditions with pointer fields. +func boolPtr(v bool) *bool { + return &v +} + +// strPtr is a helper to get a pointer to a string value, used for testing pointer string fields. +func strPtr(s string) *string { + return &s +} + +func databaseNames(defs []enterprisev4.DatabaseDefinition) []string { + names := make([]string, 0, len(defs)) + for _, def := range defs { + names = append(names, def.Name) + } + return names +} + +func assertGeneratedPassword(t *testing.T, got string, wantLength, wantDigits int) { + t.Helper() + + digitCount := 0 + for _, r := range got { + if unicode.IsDigit(r) { + digitCount++ + continue + } + + assert.Truef(t, unicode.IsLetter(r), "password contains unsupported rune %q", r) + } + + assert.Len(t, got, wantLength) + assert.Equal(t, wantDigits, digitCount) +} + +// testScheme constructs a runtime.Scheme with the necessary API types registered for testing. +func testScheme(t *testing.T) *runtime.Scheme { + t.Helper() + + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(enterprisev4.AddToScheme(scheme)) + utilruntime.Must(cnpgv1.AddToScheme(scheme)) + + return scheme +} + +// testClient constructs a fake client with the given scheme and initial objects for testing. +func testClient(t *testing.T, scheme *runtime.Scheme, objs ...client.Object) client.Client { + t.Helper() + + builder := fake.NewClientBuilder(). + WithScheme(scheme). + WithStatusSubresource(&enterprisev4.PostgresDatabase{}). + WithObjects(objs...) + + return builder.Build() +} + +func TestGetDesiredUsers(t *testing.T) { + postgresDB := &enterprisev4.PostgresDatabase{ + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "main_db"}, + {Name: "secondary_db"}, + }, + }, + } + want := []string{ + "main_db_admin", + "main_db_rw", + "secondary_db_admin", + "secondary_db_rw", + } + + got := getDesiredUsers(postgresDB) + + assert.Equal(t, want, got) +} + +func TestGetUsersInClusterSpec(t *testing.T) { + cluster := &enterprisev4.PostgresCluster{ + Spec: enterprisev4.PostgresClusterSpec{ + ManagedRoles: []enterprisev4.ManagedRole{ + {Name: "main_db_admin"}, + {Name: "main_db_rw"}, + }, + }, + } + want := []string{"main_db_admin", "main_db_rw"} + + got := getUsersInClusterSpec(cluster) + + assert.Equal(t, want, got) +} + +func TestParseRoleNames(t *testing.T) { + validKey, err := json.Marshal(map[string]string{"name": "main_db_admin"}) + require.NoError(t, err) + ignoredKey, err := json.Marshal(map[string]string{"other": "value"}) + require.NoError(t, err) + + tests := []struct { + name string + raw []byte + want []string + }{ + { + name: "extracts role names from managed roles fields", + raw: managedRolesFieldsRaw( + t, + "k:"+string(validKey), + "k:"+string(ignoredKey), + "plain-key", + ), + want: []string{"main_db_admin"}, + }, + { + name: "returns nil on invalid json", + raw: []byte(`{"f:spec"`), + want: nil, + }, + { + name: "returns empty when managed roles missing", + raw: []byte(`{"f:spec":{}}`), + want: nil, + }, + { + name: "returns empty when spec field is missing entirely", + raw: []byte(`{"f:metadata":{}}`), + want: nil, + }, + } + + for _, tst := range tests { + + t.Run(tst.name, func(t *testing.T) { + got := parseRoleNames(tst.raw) + + assert.ElementsMatch(t, tst.want, got) + }) + } +} + +func TestManagedRoleOwners(t *testing.T) { + roleKey, err := json.Marshal(map[string]string{"name": "main_db_admin"}) + require.NoError(t, err) + secondRoleKey, err := json.Marshal(map[string]string{"name": "main_db_rw"}) + require.NoError(t, err) + + managedFields := []metav1.ManagedFieldsEntry{ + {Manager: "ignored"}, + { + Manager: "postgresdatabase-other", + FieldsV1: &metav1.FieldsV1{ + Raw: managedRolesFieldsRaw( + t, + "k:"+string(roleKey), + "k:"+string(secondRoleKey), + ), + }, + }, + { + Manager: "postgresdatabase-newer", + FieldsV1: &metav1.FieldsV1{ + Raw: managedRolesFieldsRaw(t, "k:"+string(roleKey)), + }, + }, + } + want := map[string]string{ + "main_db_admin": "postgresdatabase-newer", + "main_db_rw": "postgresdatabase-other", + } + + got := managedRoleOwners(managedFields) + + assert.Equal(t, want, got) +} + +func TestGetRoleConflicts(t *testing.T) { + roleKey, err := json.Marshal(map[string]string{"name": "main_db_admin"}) + require.NoError(t, err) + sameOwnerKey, err := json.Marshal(map[string]string{"name": "main_db_rw"}) + require.NoError(t, err) + unrelatedKey, err := json.Marshal(map[string]string{"name": "audit_admin"}) + require.NoError(t, err) + + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{Name: "primary"}, + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{{Name: "main_db"}}, + }, + } + cluster := &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + ManagedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "postgresdatabase-legacy", + FieldsV1: &metav1.FieldsV1{ + Raw: managedRolesFieldsRaw( + t, + "k:"+string(roleKey), + "k:"+string(unrelatedKey), + ), + }, + }, + { + Manager: fieldManagerName(postgresDB.Name), + FieldsV1: &metav1.FieldsV1{ + Raw: managedRolesFieldsRaw(t, "k:"+string(sameOwnerKey)), + }, + }, + }, + }, + } + want := []string{"main_db_admin (owned by postgresdatabase-legacy)"} + + got := getRoleConflicts(postgresDB, cluster) + + assert.ElementsMatch(t, want, got) +} + +func TestVerifyRolesReady(t *testing.T) { + tests := []struct { + name string + expectedUsers []string + cluster *cnpgv1.Cluster + wantNotReady []string + wantErr string + }{ + { + name: "returns error when a role cannot reconcile", + expectedUsers: []string{"main_db_admin", "main_db_rw"}, + cluster: &cnpgv1.Cluster{ + Status: cnpgv1.ClusterStatus{ + ManagedRolesStatus: cnpgv1.ManagedRoles{ + CannotReconcile: map[string][]string{ + "main_db_rw": {"reserved role"}, + }, + }, + }, + }, + wantErr: "user main_db_rw reconciliation failed: [reserved role]", + }, + { + name: "returns missing roles that are not reconciled yet", + expectedUsers: []string{"main_db_admin", "main_db_rw", "analytics_admin"}, + cluster: &cnpgv1.Cluster{ + Status: cnpgv1.ClusterStatus{ + ManagedRolesStatus: cnpgv1.ManagedRoles{ + ByStatus: map[cnpgv1.RoleStatus][]string{ + cnpgv1.RoleStatusReconciled: {"main_db_admin", "analytics_admin"}, + }, + }, + }, + }, + wantNotReady: []string{"main_db_rw"}, + }, + { + name: "returns pending reconciliation roles as not ready", + expectedUsers: []string{"main_db_admin", "main_db_rw"}, + cluster: &cnpgv1.Cluster{ + Status: cnpgv1.ClusterStatus{ + ManagedRolesStatus: cnpgv1.ManagedRoles{ + ByStatus: map[cnpgv1.RoleStatus][]string{ + cnpgv1.RoleStatusReconciled: {"main_db_admin"}, + cnpgv1.RoleStatusPendingReconciliation: {"main_db_rw"}, + }, + }, + }, + }, + wantNotReady: []string{"main_db_rw"}, + }, + { + name: "returns empty when all roles are reconciled", + expectedUsers: []string{"main_db_admin"}, + cluster: &cnpgv1.Cluster{ + Status: cnpgv1.ClusterStatus{ + ManagedRolesStatus: cnpgv1.ManagedRoles{ + ByStatus: map[cnpgv1.RoleStatus][]string{ + cnpgv1.RoleStatusReconciled: {"main_db_admin"}, + }, + }, + }, + }, + wantNotReady: nil, + }, + } + + for _, tst := range tests { + + t.Run(tst.name, func(t *testing.T) { + gotNotReady, err := verifyRolesReady(context.Background(), tst.expectedUsers, tst.cluster) + if tst.wantErr != "" { + require.Error(t, err) + assert.Equal(t, tst.wantErr, err.Error()) + return + } + require.NoError(t, err) + assert.Equal(t, tst.wantNotReady, gotNotReady) + }) + } +} + +func TestReconcileRWRolePrivileges(t *testing.T) { + tests := []struct { + name string + dbNames []string + newRepoErrs map[string]error + execErrs map[string]error + wantRepoCalls []string + wantExecCalls map[string][]string + wantErrContains []string + }{ + { + name: "returns nil when all databases succeed", + dbNames: []string{"payments", "analytics"}, + wantRepoCalls: []string{"payments", "analytics"}, + wantExecCalls: map[string][]string{ + "payments": {"payments"}, + "analytics": {"analytics"}, + }, + }, + { + name: "continues after repo creation and exec errors", + dbNames: []string{"payments", "analytics", "audit"}, + newRepoErrs: map[string]error{"payments": errors.New("connect failed")}, + execErrs: map[string]error{"analytics": errors.New("grant failed")}, + wantRepoCalls: []string{"payments", "analytics", "audit"}, + wantExecCalls: map[string][]string{ + "analytics": {"analytics"}, + "audit": {"audit"}, + }, + wantErrContains: []string{ + "database payments: connect failed", + "database analytics: grant failed", + }, + }, + } + + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + repos := make(map[string]*stubDBRepo, len(tst.dbNames)) + repoCalls := make([]string, 0, len(tst.dbNames)) + + for _, dbName := range tst.dbNames { + repos[dbName] = &stubDBRepo{execErr: tst.execErrs[dbName]} + } + + newDBRepo := func(_ context.Context, host, dbName, password string) (DBRepo, error) { + repoCalls = append(repoCalls, dbName) + if err := tst.newRepoErrs[dbName]; err != nil { + return nil, err + } + + return repos[dbName], nil + } + + err := reconcileRWRolePrivileges(context.Background(), "rw.example.internal", "supersecret", tst.dbNames, newDBRepo) + + assert.Equal(t, tst.wantRepoCalls, repoCalls) + for dbName, wantCalls := range tst.wantExecCalls { + assert.Equal(t, wantCalls, repos[dbName].calls) + } + + if len(tst.wantErrContains) == 0 { + assert.NoError(t, err) + return + } + + require.Error(t, err) + for _, wantMsg := range tst.wantErrContains { + assert.ErrorContains(t, err, wantMsg) + } + }) + } +} + +func TestGetClusterReadyStatus(t *testing.T) { + tests := []struct { + name string + cluster *enterprisev4.PostgresCluster + wantStatus clusterReadyStatus + }{ + { + name: "returns not ready when phase is nil", + cluster: &enterprisev4.PostgresCluster{}, + wantStatus: ClusterNotReady, + }, + { + name: "returns not ready when phase is not ready", + cluster: &enterprisev4.PostgresCluster{ + Status: enterprisev4.PostgresClusterStatus{ + Phase: strPtr("Provisioning"), + }, + }, + wantStatus: ClusterNotReady, + }, + { + name: "returns no provisioner ref when phase is ready but ref is missing", + cluster: &enterprisev4.PostgresCluster{ + Status: enterprisev4.PostgresClusterStatus{ + Phase: strPtr(string(ClusterReady)), + }, + }, + wantStatus: ClusterNoProvisionerRef, + }, + { + name: "returns ready when phase and provisioner ref are present", + cluster: &enterprisev4.PostgresCluster{ + Status: enterprisev4.PostgresClusterStatus{ + Phase: strPtr(string(ClusterReady)), + ProvisionerRef: &corev1.ObjectReference{Name: "cnpg-primary", Namespace: "dbs"}, + }, + }, + wantStatus: ClusterReady, + }, + } + + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + assert.Equal(t, tst.wantStatus, getClusterReadyStatus(tst.cluster)) + }) + } +} + +// Uses a fake client because fetching the referenced Cluster depends on API reads. +func TestFetchCluster(t *testing.T) { + scheme := testScheme(t) + + tests := []struct { + name string + cluster *enterprisev4.PostgresCluster + wantName string + wantErr string + wantAbsent bool + }{ + { + name: "returns not found when cluster is absent", + wantAbsent: true, + }, + { + name: "returns referenced cluster when present", + cluster: &enterprisev4.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "primary", Namespace: "dbs"}, + }, + wantName: "primary", + }, + } + + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{Name: "db", Namespace: "dbs"}, + Spec: enterprisev4.PostgresDatabaseSpec{ + ClusterRef: corev1.LocalObjectReference{Name: "primary"}, + }, + } + + var objs []client.Object + if tst.cluster != nil { + objs = append(objs, tst.cluster) + } + + c := testClient(t, scheme, objs...) + cluster, err := fetchCluster(context.Background(), c, postgresDB) + + if tst.wantAbsent { + require.Error(t, err) + assert.True(t, apierrors.IsNotFound(err)) + assert.Nil(t, cluster) + return + } + + if tst.wantErr != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tst.wantErr) + return + } + + require.NoError(t, err) + require.NotNil(t, cluster) + assert.Equal(t, tst.wantName, cluster.Name) + }) + } + + t.Run("returns error on client failure", func(t *testing.T) { + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{Name: "db", Namespace: "dbs"}, + Spec: enterprisev4.PostgresDatabaseSpec{ + ClusterRef: corev1.LocalObjectReference{Name: "primary"}, + }, + } + c := fake.NewClientBuilder(). + WithScheme(scheme). + WithInterceptorFuncs(interceptor.Funcs{ + Get: func(_ context.Context, _ client.WithWatch, _ client.ObjectKey, _ client.Object, _ ...client.GetOption) error { + return errors.New("api unavailable") + }, + }). + Build() + + cluster, err := fetchCluster(context.Background(), c, postgresDB) + + require.Error(t, err) + assert.Nil(t, cluster) + assert.ErrorContains(t, err, "api unavailable") + }) +} + +// Uses a fake client because the helper mutates status in-memory and persists it through the status subresource. +func TestSetStatus(t *testing.T) { + scheme := testScheme(t) + existing := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + Generation: 7, + }, + } + c := testClient(t, scheme, existing) + postgresDB := &enterprisev4.PostgresDatabase{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: existing.Name, Namespace: existing.Namespace}, postgresDB)) + + err := persistStatus( + context.Background(), + c, + postgresDB, + clusterReady, + metav1.ConditionTrue, + reasonClusterAvailable, + "Cluster is operational", + provisioningDBPhase, + ) + + require.NoError(t, err) + require.NotNil(t, postgresDB.Status.Phase) + assert.Equal(t, string(provisioningDBPhase), *postgresDB.Status.Phase) + require.Len(t, postgresDB.Status.Conditions, 1) + assert.Equal(t, string(clusterReady), postgresDB.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, postgresDB.Status.Conditions[0].Status) + assert.Equal(t, string(reasonClusterAvailable), postgresDB.Status.Conditions[0].Reason) + assert.Equal(t, "Cluster is operational", postgresDB.Status.Conditions[0].Message) + assert.Equal(t, postgresDB.Generation, postgresDB.Status.Conditions[0].ObservedGeneration) + + got := &enterprisev4.PostgresDatabase{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: postgresDB.Name, Namespace: postgresDB.Namespace}, got)) + require.NotNil(t, got.Status.Phase) + assert.Equal(t, *postgresDB.Status.Phase, *got.Status.Phase) + require.Len(t, got.Status.Conditions, 1) + assert.Equal(t, postgresDB.Status.Conditions[0], got.Status.Conditions[0]) +} + +// Uses a fake client because readiness is determined from CNPG Database objects in the API. +func TestVerifyDatabasesReady(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{Name: "primary", Namespace: "dbs"}, + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + {Name: "analytics"}, + }, + }, + } + + tests := []struct { + name string + objects []client.Object + wantNotReady []string + wantErr string + }{ + { + name: "returns empty when all databases are applied", + objects: []client.Object{ + &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{Name: "primary-payments", Namespace: "dbs"}, + Status: cnpgv1.DatabaseStatus{Applied: boolPtr(true)}, + }, + &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{Name: "primary-analytics", Namespace: "dbs"}, + Status: cnpgv1.DatabaseStatus{Applied: boolPtr(true)}, + }, + }, + wantNotReady: nil, + }, + { + name: "returns names for databases that are not applied", + objects: []client.Object{ + &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{Name: "primary-payments", Namespace: "dbs"}, + Status: cnpgv1.DatabaseStatus{Applied: boolPtr(false)}, + }, + &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{Name: "primary-analytics", Namespace: "dbs"}, + }, + }, + wantNotReady: []string{"payments", "analytics"}, + }, + { + name: "returns error when a database is missing", + objects: []client.Object{ + &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{Name: "primary-payments", Namespace: "dbs"}, + Status: cnpgv1.DatabaseStatus{Applied: boolPtr(true)}, + }, + }, + wantErr: "getting CNPG Database primary-analytics", + }, + } + + for _, tst := range tests { + + t.Run(tst.name, func(t *testing.T) { + c := testClient(t, scheme, tst.objects...) + + got, err := verifyDatabasesReady(context.Background(), c, postgresDB) + + if tst.wantErr != "" { + require.Error(t, err) + assert.ErrorContains(t, err, tst.wantErr) + return + } + + require.NoError(t, err) + assert.Equal(t, tst.wantNotReady, got) + }) + } +} + +// Uses a fake client because the helper wraps Kubernetes get/not-found behavior. +func TestGetSecret(t *testing.T) { + scheme := testScheme(t) + + t.Run("returns secret when found", func(t *testing.T) { + existing := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: "db-secret", Namespace: "dbs"}, + Data: map[string][]byte{secretKeyPassword: []byte("value")}, + } + c := testClient(t, scheme, existing) + + secret, err := getSecret(context.Background(), c, "dbs", "db-secret") + + require.NoError(t, err) + require.NotNil(t, secret) + assert.Equal(t, existing.Name, secret.Name) + assert.Equal(t, "value", string(secret.Data[secretKeyPassword])) + }) + + t.Run("returns nil nil when secret is absent", func(t *testing.T) { + c := testClient(t, scheme) + + secret, err := getSecret(context.Background(), c, "dbs", "missing") + + require.NoError(t, err) + assert.Nil(t, secret) + }) +} + +// Uses a fake client because adoption updates object metadata and persists it through the client. +func TestAdoptResource(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + UID: types.UID("postgresdb-uid"), + }, + } + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary-payments-config", + Namespace: "dbs", + Annotations: map[string]string{annotationRetainedFrom: "primary", "keep": "true"}, + }, + } + c := testClient(t, scheme, postgresDB, configMap) + + err := adoptResource(context.Background(), c, scheme, postgresDB, configMap) + + require.NoError(t, err) + + updated := &corev1.ConfigMap{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: configMap.Name, Namespace: configMap.Namespace}, updated)) + assert.Equal(t, "true", updated.Annotations["keep"]) + _, exists := updated.Annotations[annotationRetainedFrom] + assert.False(t, exists) + require.Len(t, updated.OwnerReferences, 1) + assert.Equal(t, postgresDB.UID, updated.OwnerReferences[0].UID) +} + +// Uses a fake client because these helpers mutate existing API objects during orphaning. +func TestOrphanResourceHelpers(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + UID: types.UID("postgresdb-uid"), + }, + } + databases := []enterprisev4.DatabaseDefinition{{Name: "payments"}} + + t.Run("orphanCNPGDatabases strips owner and adds retain annotation", func(t *testing.T) { + db := &cnpgv1.Database{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary-payments", + Namespace: "dbs", + OwnerReferences: []metav1.OwnerReference{ + {UID: postgresDB.UID, Name: postgresDB.Name}, + {UID: types.UID("other"), Name: "other"}, + }, + }, + } + c := testClient(t, scheme, db) + + require.NoError(t, orphanCNPGDatabases(context.Background(), c, postgresDB, databases)) + + updated := &cnpgv1.Database{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: db.Name, Namespace: db.Namespace}, updated)) + assert.Equal(t, postgresDB.Name, updated.Annotations[annotationRetainedFrom]) + require.Len(t, updated.OwnerReferences, 1) + assert.Equal(t, types.UID("other"), updated.OwnerReferences[0].UID) + }) + + t.Run("orphanConfigMaps skips not found", func(t *testing.T) { + c := testClient(t, scheme) + require.NoError(t, orphanConfigMaps(context.Background(), c, postgresDB, databases)) + }) + + t.Run("orphanSecrets skips already retained secret", func(t *testing.T) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary-payments-admin", + Namespace: "dbs", + Annotations: map[string]string{annotationRetainedFrom: postgresDB.Name}, + }, + } + c := testClient(t, scheme, secret) + + require.NoError(t, orphanSecrets(context.Background(), c, postgresDB, databases)) + + updated := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, updated)) + assert.Equal(t, postgresDB.Name, updated.Annotations[annotationRetainedFrom]) + assert.Empty(t, updated.OwnerReferences) + assert.Equal(t, secret, updated) + }) +} + +// Uses a fake client because these helpers delete Kubernetes resources and must verify API state. +func TestDeleteResourceHelpers(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{Name: "primary", Namespace: "dbs"}, + } + databases := []enterprisev4.DatabaseDefinition{{Name: "payments"}} + + t.Run("deleteCNPGDatabases removes existing object", func(t *testing.T) { + db := &cnpgv1.Database{ObjectMeta: metav1.ObjectMeta{Name: "primary-payments", Namespace: "dbs"}} + c := testClient(t, scheme, db) + require.NoError(t, deleteCNPGDatabases(context.Background(), c, postgresDB, databases)) + }) + + t.Run("deleteConfigMaps ignores missing objects", func(t *testing.T) { + c := testClient(t, scheme) + require.NoError(t, deleteConfigMaps(context.Background(), c, postgresDB, databases)) + }) + + t.Run("deleteSecrets deletes admin and rw secrets", func(t *testing.T) { + admin := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "primary-payments-admin", Namespace: "dbs"}} + rw := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "primary-payments-rw", Namespace: "dbs"}} + c := testClient(t, scheme, admin, rw) + require.NoError(t, deleteSecrets(context.Background(), c, postgresDB, databases)) + }) +} + +func TestGeneratePassword(t *testing.T) { + wantLength := passwordLength + wantDigits := passwordDigits + + got, err := generatePassword() + + require.NoError(t, err) + assertGeneratedPassword(t, got, wantLength, wantDigits) +} + +// Uses a fake client because the helper creates Secret objects and persists owner references through the Kubernetes API. +func TestCreateUserSecret(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + UID: types.UID("postgresdb-uid"), + }, + } + + t.Run("creates secret with generated credentials", func(t *testing.T) { + roleName := "payments_admin" + secretName := "primary-payments-admin" + wantManagedBy := "splunk-operator" + wantReload := "true" + wantUsername := roleName + wantOwnerUID := postgresDB.UID + wantPasswordLength := passwordLength + wantPasswordDigits := passwordDigits + c := testClient(t, scheme) + + err := createUserSecret(context.Background(), c, scheme, postgresDB, roleName, secretName) + + require.NoError(t, err) + + got := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: secretName, Namespace: postgresDB.Namespace}, got)) + assert.Equal(t, secretName, got.Name) + assert.Equal(t, postgresDB.Namespace, got.Namespace) + assert.Equal(t, wantManagedBy, got.Labels[labelManagedBy]) + assert.Equal(t, wantReload, got.Labels[labelCNPGReload]) + assert.Equal(t, wantUsername, string(got.Data["username"])) + assertGeneratedPassword(t, string(got.Data[secretKeyPassword]), wantPasswordLength, wantPasswordDigits) + require.Len(t, got.OwnerReferences, 1) + assert.Equal(t, wantOwnerUID, got.OwnerReferences[0].UID) + }) + + t.Run("returns nil when secret already exists", func(t *testing.T) { + roleName := "payments_admin" + secretName := "primary-payments-admin" + wantUsername := roleName + wantPassword := "existing-password" + existing := buildPasswordSecret(postgresDB, secretName, wantUsername, wantPassword) + c := testClient(t, scheme, existing) + + err := createUserSecret(context.Background(), c, scheme, postgresDB, roleName, secretName) + + require.NoError(t, err) + + got := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: secretName, Namespace: postgresDB.Namespace}, got)) + assert.Equal(t, wantUsername, string(got.Data["username"])) + assert.Equal(t, wantPassword, string(got.Data[secretKeyPassword])) + assert.Empty(t, got.OwnerReferences) + }) +} + +// Uses a fake client because the helper decides between get/create/adopt behavior based on Secret state in the API. +func TestEnsureSecret(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + UID: types.UID("postgresdb-uid"), + }, + } + + t.Run("creates missing secret", func(t *testing.T) { + roleName := "payments_admin" + secretName := "primary-payments-admin" + wantManagedBy := "splunk-operator" + wantReload := "true" + wantUsername := roleName + wantOwnerUID := postgresDB.UID + wantPasswordLength := passwordLength + wantPasswordDigits := passwordDigits + c := testClient(t, scheme) + + err := ensureSecret(context.Background(), c, scheme, postgresDB, roleName, secretName) + + require.NoError(t, err) + + got := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: secretName, Namespace: postgresDB.Namespace}, got)) + assert.Equal(t, wantManagedBy, got.Labels[labelManagedBy]) + assert.Equal(t, wantReload, got.Labels[labelCNPGReload]) + assert.Equal(t, wantUsername, string(got.Data["username"])) + assertGeneratedPassword(t, string(got.Data[secretKeyPassword]), wantPasswordLength, wantPasswordDigits) + require.Len(t, got.OwnerReferences, 1) + assert.Equal(t, wantOwnerUID, got.OwnerReferences[0].UID) + }) + + t.Run("re-adopts retained secret", func(t *testing.T) { + roleName := "payments_admin" + secretName := "primary-payments-admin" + wantUsername := roleName + wantPassword := "existing-password" + wantOwnerUID := postgresDB.UID + wantKeep := "true" + retained := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: postgresDB.Namespace, + Annotations: map[string]string{ + annotationRetainedFrom: postgresDB.Name, + "keep": wantKeep, + }, + OwnerReferences: []metav1.OwnerReference{ + {UID: types.UID("old-owner"), Name: "old-owner"}, + }, + }, + Data: map[string][]byte{ + "username": []byte(wantUsername), + secretKeyPassword: []byte(wantPassword), + }, + } + c := testClient(t, scheme, retained) + + err := ensureSecret(context.Background(), c, scheme, postgresDB, roleName, secretName) + + require.NoError(t, err) + + got := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: secretName, Namespace: postgresDB.Namespace}, got)) + assert.Equal(t, wantKeep, got.Annotations["keep"]) + _, hasRetainedAnnotation := got.Annotations[annotationRetainedFrom] + assert.False(t, hasRetainedAnnotation) + assert.Equal(t, wantUsername, string(got.Data["username"])) + assert.Equal(t, wantPassword, string(got.Data[secretKeyPassword])) + assert.Contains(t, got.OwnerReferences, metav1.OwnerReference{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + Name: postgresDB.Name, + UID: wantOwnerUID, + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }) + }) + + t.Run("does nothing for existing managed secret", func(t *testing.T) { + roleName := "payments_admin" + secretName := "primary-payments-admin" + wantUsername := roleName + wantPassword := "existing-password" + wantKeep := "true" + wantOwnerUID := postgresDB.UID + existing := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: postgresDB.Namespace, + Annotations: map[string]string{ + "keep": wantKeep, + }, + OwnerReferences: []metav1.OwnerReference{ + {UID: wantOwnerUID, Name: postgresDB.Name}, + }, + }, + Data: map[string][]byte{ + "username": []byte(wantUsername), + secretKeyPassword: []byte(wantPassword), + }, + } + c := testClient(t, scheme, existing) + + err := ensureSecret(context.Background(), c, scheme, postgresDB, roleName, secretName) + + require.NoError(t, err) + + got := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: secretName, Namespace: postgresDB.Namespace}, got)) + assert.Equal(t, wantKeep, got.Annotations["keep"]) + assert.Equal(t, wantUsername, string(got.Data["username"])) + assert.Equal(t, wantPassword, string(got.Data[secretKeyPassword])) + require.Len(t, got.OwnerReferences, 1) + assert.Equal(t, wantOwnerUID, got.OwnerReferences[0].UID) + }) +} + +// Uses a fake client because the helper reconciles multiple Secret objects through the Kubernetes API. +func TestReconcileUserSecrets(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + UID: types.UID("postgresdb-uid"), + }, + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + {Name: "analytics"}, + }, + }, + } + + t.Run("creates secrets for each database role", func(t *testing.T) { + c := testClient(t, scheme) + wantSecrets := []struct { + name string + username string + }{ + {name: "primary-payments-admin", username: "payments_admin"}, + {name: "primary-payments-rw", username: "payments_rw"}, + {name: "primary-analytics-admin", username: "analytics_admin"}, + {name: "primary-analytics-rw", username: "analytics_rw"}, + } + + err := reconcileUserSecrets(context.Background(), c, scheme, postgresDB) + + require.NoError(t, err) + for _, want := range wantSecrets { + got := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: want.name, Namespace: postgresDB.Namespace}, got)) + assert.Equal(t, want.username, string(got.Data["username"])) + assertGeneratedPassword(t, string(got.Data[secretKeyPassword]), passwordLength, passwordDigits) + require.Len(t, got.OwnerReferences, 1) + assert.Equal(t, postgresDB.UID, got.OwnerReferences[0].UID) + } + }) + + t.Run("is idempotent when secrets already exist", func(t *testing.T) { + c := testClient(t, scheme) + + require.NoError(t, reconcileUserSecrets(context.Background(), c, scheme, postgresDB)) + + before := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: "primary-payments-admin", Namespace: postgresDB.Namespace}, before)) + beforePassword := append([]byte(nil), before.Data[secretKeyPassword]...) + + err := reconcileUserSecrets(context.Background(), c, scheme, postgresDB) + + require.NoError(t, err) + + after := &corev1.Secret{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: "primary-payments-admin", Namespace: postgresDB.Namespace}, after)) + assert.Equal(t, beforePassword, after.Data[secretKeyPassword]) + require.Len(t, after.OwnerReferences, 1) + assert.Equal(t, postgresDB.UID, after.OwnerReferences[0].UID) + }) +} + +// Uses a fake client because the helper reconciles ConfigMaps through CreateOrUpdate and persists re-adoption metadata. +func TestReconcileRoleConfigMaps(t *testing.T) { + scheme := testScheme(t) + endpoints := clusterEndpoints{ + RWHost: "rw.default.svc.cluster.local", + ROHost: "ro.default.svc.cluster.local", + PoolerRWHost: "pooler-rw.default.svc.cluster.local", + PoolerROHost: "pooler-ro.default.svc.cluster.local", + } + + t.Run("creates configmaps for all databases", func(t *testing.T) { + postgresDB := &enterprisev4.PostgresDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + UID: types.UID("postgresdb-uid"), + }, + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + {Name: "analytics"}, + }, + }, + } + wantManagedBy := "splunk-operator" + wantOwnerUID := postgresDB.UID + wantPaymentsName := "primary-payments-config" + wantAnalyticsName := "primary-analytics-config" + wantPaymentsData := buildDatabaseConfigMapBody("payments", endpoints) + wantAnalyticsData := buildDatabaseConfigMapBody("analytics", endpoints) + c := testClient(t, scheme) + + err := reconcileRoleConfigMaps(context.Background(), c, scheme, postgresDB, endpoints) + + require.NoError(t, err) + + gotPayments := &corev1.ConfigMap{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: wantPaymentsName, Namespace: postgresDB.Namespace}, gotPayments)) + assert.Equal(t, wantManagedBy, gotPayments.Labels[labelManagedBy]) + assert.Equal(t, wantPaymentsData, gotPayments.Data) + require.Len(t, gotPayments.OwnerReferences, 1) + assert.Equal(t, wantOwnerUID, gotPayments.OwnerReferences[0].UID) + + gotAnalytics := &corev1.ConfigMap{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: wantAnalyticsName, Namespace: postgresDB.Namespace}, gotAnalytics)) + assert.Equal(t, wantManagedBy, gotAnalytics.Labels[labelManagedBy]) + assert.Equal(t, wantAnalyticsData, gotAnalytics.Data) + require.Len(t, gotAnalytics.OwnerReferences, 1) + assert.Equal(t, wantOwnerUID, gotAnalytics.OwnerReferences[0].UID) + }) + + t.Run("re-adopts retained configmap", func(t *testing.T) { + postgresDB := &enterprisev4.PostgresDatabase{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + UID: types.UID("postgresdb-uid"), + }, + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + }, + }, + } + cmName := "primary-payments-config" + wantManagedBy := "splunk-operator" + wantOwnerUID := postgresDB.UID + wantKeep := "true" + wantData := buildDatabaseConfigMapBody("payments", endpoints) + retained := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: cmName, + Namespace: postgresDB.Namespace, + Labels: map[string]string{labelManagedBy: wantManagedBy}, + Annotations: map[string]string{ + annotationRetainedFrom: postgresDB.Name, + "keep": wantKeep, + }, + OwnerReferences: []metav1.OwnerReference{ + {UID: types.UID("old-owner"), Name: "old-owner"}, + }, + }, + Data: map[string]string{ + "dbname": "stale", + }, + } + c := testClient(t, scheme, retained) + + err := reconcileRoleConfigMaps(context.Background(), c, scheme, postgresDB, endpoints) + + require.NoError(t, err) + + got := &corev1.ConfigMap{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: cmName, Namespace: postgresDB.Namespace}, got)) + assert.Equal(t, wantManagedBy, got.Labels[labelManagedBy]) + assert.Equal(t, wantKeep, got.Annotations["keep"]) + _, hasRetainedAnnotation := got.Annotations[annotationRetainedFrom] + assert.False(t, hasRetainedAnnotation) + assert.Equal(t, wantData, got.Data) + assert.Contains(t, got.OwnerReferences, metav1.OwnerReference{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresDatabase", + Name: postgresDB.Name, + UID: wantOwnerUID, + Controller: boolPtr(true), + BlockOwnerDeletion: boolPtr(true), + }) + }) +} + +func TestBuildDeletionPlan(t *testing.T) { + databases := []enterprisev4.DatabaseDefinition{ + {Name: "payments", DeletionPolicy: deletionPolicyRetain}, + {Name: "analytics"}, + {Name: "audit", DeletionPolicy: deletionPolicyRetain}, + } + wantRetainedNames := []string{"payments", "audit"} + wantDeletedNames := []string{"analytics"} + + got := buildDeletionPlan(databases) + + assert.ElementsMatch(t, wantRetainedNames, databaseNames(got.retained)) + assert.ElementsMatch(t, wantDeletedNames, databaseNames(got.deleted)) +} + +func TestBuildManagedRoles(t *testing.T) { + databases := []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + {Name: "analytics"}, + } + want := []enterprisev4.ManagedRole{ + { + Name: "payments_admin", + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-payments-admin"}, + Key: secretKeyPassword, + }, + }, + { + Name: "payments_rw", + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-payments-rw"}, + Key: secretKeyPassword, + }, + }, + { + Name: "analytics_admin", + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-analytics-admin"}, + Key: secretKeyPassword, + }, + }, + { + Name: "analytics_rw", + Exists: true, + PasswordSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-analytics-rw"}, + Key: secretKeyPassword, + }, + }, + } + + got := buildManagedRoles("primary", databases) + + assert.Equal(t, want, got) +} + +func TestBuildManagedRolesPatch(t *testing.T) { + cluster := &enterprisev4.PostgresCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + }, + } + roles := buildManagedRoles("primary", []enterprisev4.DatabaseDefinition{{Name: "payments"}}) + + got := buildManagedRolesPatch(cluster, roles) + + assert.Equal(t, cluster.APIVersion, got.Object["apiVersion"]) + assert.Equal(t, cluster.Kind, got.Object["kind"]) + assert.Equal(t, map[string]any{"name": cluster.Name, "namespace": cluster.Namespace}, got.Object["metadata"]) + assert.Equal(t, map[string]any{"managedRoles": roles}, got.Object["spec"]) +} + +func TestPatchManagedRolesOnDeletion(t *testing.T) { + scheme := testScheme(t) + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + }, + } + cluster := &enterprisev4.PostgresCluster{ + TypeMeta: metav1.TypeMeta{ + APIVersion: enterprisev4.GroupVersion.String(), + Kind: "PostgresCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + }, + } + retained := []enterprisev4.DatabaseDefinition{{Name: "payments"}} + want := buildManagedRoles(postgresDB.Name, retained) + c := testClient(t, scheme, cluster) + + err := patchManagedRolesOnDeletion(context.Background(), c, postgresDB, cluster, retained) + + require.NoError(t, err) + + got := &enterprisev4.PostgresCluster{} + require.NoError(t, c.Get(context.Background(), types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}, got)) + assert.Equal(t, want, got.Spec.ManagedRoles) +} + +func TestStripOwnerReference(t *testing.T) { + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + OwnerReferences: []metav1.OwnerReference{ + {UID: types.UID("remove-me"), Name: "db"}, + {UID: types.UID("keep-me"), Name: "cluster"}, + }, + }, + } + + stripOwnerReference(obj, types.UID("remove-me")) + + require.Len(t, obj.OwnerReferences, 1) + assert.Equal(t, types.UID("keep-me"), obj.OwnerReferences[0].UID) +} + +func TestBuildPasswordSecret(t *testing.T) { + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{ + Name: "primary", + Namespace: "dbs", + }, + } + wantName := "primary-payments-admin" + wantNamespace := "dbs" + wantManagedBy := "splunk-operator" + wantReload := "true" + wantUsername := "payments_admin" + wantPassword := "topsecret" + + got := buildPasswordSecret(postgresDB, wantName, wantUsername, wantPassword) + + assert.Equal(t, wantName, got.Name) + assert.Equal(t, wantNamespace, got.Namespace) + assert.Equal(t, wantManagedBy, got.Labels[labelManagedBy]) + assert.Equal(t, wantReload, got.Labels[labelCNPGReload]) + assert.Equal(t, wantUsername, string(got.Data["username"])) + assert.Equal(t, wantPassword, string(got.Data[secretKeyPassword])) +} + +func TestBuildCNPGDatabaseSpec(t *testing.T) { + tests := []struct { + name string + db enterprisev4.DatabaseDefinition + want cnpgv1.DatabaseSpec + }{ + { + name: "uses delete reclaim policy by default", + db: enterprisev4.DatabaseDefinition{Name: "payments"}, + want: cnpgv1.DatabaseSpec{ + Name: "payments", + Owner: "payments_admin", + ClusterRef: corev1.LocalObjectReference{Name: "cnpg-primary"}, + ReclaimPolicy: cnpgv1.DatabaseReclaimDelete, + }, + }, + { + name: "uses retain reclaim policy when deletion policy is retain", + db: enterprisev4.DatabaseDefinition{Name: "analytics", DeletionPolicy: deletionPolicyRetain}, + want: cnpgv1.DatabaseSpec{ + Name: "analytics", + Owner: "analytics_admin", + ClusterRef: corev1.LocalObjectReference{Name: "cnpg-primary"}, + ReclaimPolicy: cnpgv1.DatabaseReclaimRetain, + }, + }, + } + + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + got := buildCNPGDatabaseSpec("cnpg-primary", tst.db) + assert.Equal(t, tst.want, got) + }) + } +} + +func TestBuildDatabaseConfigMapBody(t *testing.T) { + tests := []struct { + name string + endpoints clusterEndpoints + want map[string]string + }{ + { + name: "without pooler endpoints", + endpoints: clusterEndpoints{ + RWHost: "rw.default.svc.cluster.local", + ROHost: "ro.default.svc.cluster.local", + }, + want: map[string]string{ + "dbname": "payments", + "port": postgresPort, + "rw-host": "rw.default.svc.cluster.local", + "ro-host": "ro.default.svc.cluster.local", + "admin-user": "payments_admin", + "rw-user": "payments_rw", + }, + }, + { + name: "includes pooler endpoints when available", + endpoints: clusterEndpoints{ + RWHost: "rw.default.svc.cluster.local", + ROHost: "ro.default.svc.cluster.local", + PoolerRWHost: "pooler-rw.default.svc.cluster.local", + PoolerROHost: "pooler-ro.default.svc.cluster.local", + }, + want: map[string]string{ + "dbname": "payments", + "port": postgresPort, + "rw-host": "rw.default.svc.cluster.local", + "ro-host": "ro.default.svc.cluster.local", + "admin-user": "payments_admin", + "rw-user": "payments_rw", + "pooler-rw-host": "pooler-rw.default.svc.cluster.local", + "pooler-ro-host": "pooler-ro.default.svc.cluster.local", + }, + }, + } + + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + got := buildDatabaseConfigMapBody("payments", tst.endpoints) + assert.Equal(t, tst.want, got) + }) + } +} + +func TestResolveClusterEndpoints(t *testing.T) { + tests := []struct { + name string + cluster *enterprisev4.PostgresCluster + cnpg *cnpgv1.Cluster + namespace string + want clusterEndpoints + }{ + { + name: "without connection pooler", + cluster: &enterprisev4.PostgresCluster{}, + cnpg: &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "cnpg-primary"}, + Status: cnpgv1.ClusterStatus{ + WriteService: "primary-rw", + ReadService: "primary-ro", + }, + }, + namespace: "dbs", + want: clusterEndpoints{ + RWHost: "primary-rw.dbs.svc.cluster.local", + ROHost: "primary-ro.dbs.svc.cluster.local", + }, + }, + { + name: "with connection pooler", + cluster: &enterprisev4.PostgresCluster{ + Status: enterprisev4.PostgresClusterStatus{ + ConnectionPoolerStatus: &enterprisev4.ConnectionPoolerStatus{Enabled: true}, + }, + }, + cnpg: &cnpgv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "cnpg-primary"}, + Status: cnpgv1.ClusterStatus{ + WriteService: "primary-rw", + ReadService: "primary-ro", + }, + }, + namespace: "dbs", + want: clusterEndpoints{ + RWHost: "primary-rw.dbs.svc.cluster.local", + ROHost: "primary-ro.dbs.svc.cluster.local", + PoolerRWHost: "cnpg-primary-pooler-rw.dbs.svc.cluster.local", + PoolerROHost: "cnpg-primary-pooler-ro.dbs.svc.cluster.local", + }, + }, + } + + for _, tst := range tests { + + t.Run(tst.name, func(t *testing.T) { + got := resolveClusterEndpoints(tst.cluster, tst.cnpg, tst.namespace) + assert.Equal(t, tst.want, got) + }) + } +} + +func TestPopulateDatabaseStatus(t *testing.T) { + postgresDB := &enterprisev4.PostgresDatabase{ + ObjectMeta: metav1.ObjectMeta{Name: "primary"}, + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + {Name: "analytics"}, + }, + }, + } + want := []enterprisev4.DatabaseInfo{ + { + Name: "payments", + Ready: true, + AdminUserSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-payments-admin"}, + Key: secretKeyPassword, + }, + RWUserSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-payments-rw"}, + Key: secretKeyPassword, + }, + ConfigMapRef: &corev1.LocalObjectReference{Name: "primary-payments-config"}, + }, + { + Name: "analytics", + Ready: true, + AdminUserSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-analytics-admin"}, + Key: secretKeyPassword, + }, + RWUserSecretRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "primary-analytics-rw"}, + Key: secretKeyPassword, + }, + ConfigMapRef: &corev1.LocalObjectReference{Name: "primary-analytics-config"}, + }, + } + + got := populateDatabaseStatus(postgresDB) + + assert.Equal(t, want, got) +} + +func TestHasNewDatabases(t *testing.T) { + tests := []struct { + name string + postgresDB *enterprisev4.PostgresDatabase + want bool + }{ + { + name: "returns true when spec contains a new database", + postgresDB: &enterprisev4.PostgresDatabase{ + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + {Name: "analytics"}, + }, + }, + Status: enterprisev4.PostgresDatabaseStatus{ + Databases: []enterprisev4.DatabaseInfo{ + {Name: "payments"}, + }, + }, + }, + want: true, + }, + { + name: "returns false when all spec databases already exist in status", + postgresDB: &enterprisev4.PostgresDatabase{ + Spec: enterprisev4.PostgresDatabaseSpec{ + Databases: []enterprisev4.DatabaseDefinition{ + {Name: "payments"}, + }, + }, + Status: enterprisev4.PostgresDatabaseStatus{ + Databases: []enterprisev4.DatabaseInfo{ + {Name: "payments"}, + {Name: "legacy-extra"}, + }, + }, + }, + want: false, + }, + } + + for _, tst := range tests { + + t.Run(tst.name, func(t *testing.T) { + got := hasNewDatabases(tst.postgresDB) + assert.Equal(t, tst.want, got) + }) + } +} + +func TestNamingHelpers(t *testing.T) { + tests := []struct { + name string + got string + want string + }{ + {name: "field manager", got: fieldManagerName("primary"), want: "postgresdatabase-primary"}, + {name: "admin role", got: adminRoleName("payments"), want: "payments_admin"}, + {name: "rw role", got: rwRoleName("payments"), want: "payments_rw"}, + {name: "cnpg database", got: cnpgDatabaseName("primary", "payments"), want: "primary-payments"}, + {name: "role secret", got: roleSecretName("primary", "payments", "admin"), want: "primary-payments-admin"}, + {name: "config map", got: configMapName("primary", "payments"), want: "primary-payments-config"}, + } + + for _, tst := range tests { + + t.Run(tst.name, func(t *testing.T) { + assert.Equal(t, tst.want, tst.got) + }) + } +} diff --git a/pkg/postgresql/database/core/ports.go b/pkg/postgresql/database/core/ports.go new file mode 100644 index 000000000..0ee71bfe4 --- /dev/null +++ b/pkg/postgresql/database/core/ports.go @@ -0,0 +1,10 @@ +package core + +import "context" + +// DBRepo is the port for all direct database operations that require a +// superuser connection, bypassing any connection pooler. +// Adapters implementing this port live in adapter/. +type DBRepo interface { + ExecGrants(ctx context.Context, dbName string) error +} diff --git a/pkg/postgresql/database/core/types.go b/pkg/postgresql/database/core/types.go new file mode 100644 index 000000000..0d1fa116a --- /dev/null +++ b/pkg/postgresql/database/core/types.go @@ -0,0 +1,94 @@ +package core + +import ( + "time" + + enterprisev4 "github.com/splunk/splunk-operator/api/v4" +) + +type reconcileDBPhases string +type conditionTypes string +type conditionReasons string +type clusterReadyStatus string + +const ( + retryDelay = time.Second * 15 + clusterNotFoundRetryDelay = time.Second * 30 + + postgresPort string = "5432" + + readOnlyEndpoint string = "ro" + readWriteEndpoint string = "rw" + + deletionPolicyRetain string = "Retain" + + postgresDatabaseFinalizerName string = "postgresdatabases.enterprise.splunk.com/finalizer" + annotationRetainedFrom string = "enterprise.splunk.com/retained-from" + + fieldManagerPrefix string = "postgresdatabase-" + + secretRoleAdmin string = "admin" + secretRoleRW string = "rw" + secretKeyPassword string = "password" + + labelManagedBy string = "app.kubernetes.io/managed-by" + labelCNPGReload string = "cnpg.io/reload" + + // Password generation — no symbols for PostgreSQL connection string compatibility. + passwordLength = 32 + passwordDigits = 8 + passwordSymbols = 0 + + // DB reconcile phases + readyDBPhase reconcileDBPhases = "Ready" + pendingDBPhase reconcileDBPhases = "Pending" + provisioningDBPhase reconcileDBPhases = "Provisioning" + failedDBPhase reconcileDBPhases = "Failed" + + // condition types + clusterReady conditionTypes = "ClusterReady" + rolesReady conditionTypes = "RolesReady" + databasesReady conditionTypes = "DatabasesReady" + secretsReady conditionTypes = "SecretsReady" + configMapsReady conditionTypes = "ConfigMapsReady" + privilegesReady conditionTypes = "PrivilegesReady" + + // condition reasons + reasonClusterNotFound conditionReasons = "ClusterNotFound" + reasonClusterProvisioning conditionReasons = "ClusterProvisioning" + reasonClusterInfoFetchFailed conditionReasons = "ClusterInfoFetchNotPossible" + reasonClusterAvailable conditionReasons = "ClusterAvailable" + reasonDatabasesAvailable conditionReasons = "DatabasesAvailable" + reasonSecretsCreated conditionReasons = "SecretsCreated" + reasonSecretsCreationFailed conditionReasons = "SecretsCreationFailed" + reasonWaitingForCNPG conditionReasons = "WaitingForCNPG" + reasonUsersCreationFailed conditionReasons = "UsersCreationFailed" + reasonUsersAvailable conditionReasons = "UsersAvailable" + reasonRoleConflict conditionReasons = "RoleConflict" + reasonConfigMapsCreationFailed conditionReasons = "ConfigMapsCreationFailed" + reasonConfigMapsCreated conditionReasons = "ConfigMapsCreated" + reasonPrivilegesGranted conditionReasons = "PrivilegesGranted" + reasonPrivilegesGrantFailed conditionReasons = "PrivilegesGrantFailed" + + // ClusterReady sentinel values returned by ensureClusterReady. + // Exported so the controller adapter can switch on them if needed. + ClusterNotFound clusterReadyStatus = "NotFound" + ClusterNotReady clusterReadyStatus = "NotReady" + ClusterNoProvisionerRef clusterReadyStatus = "NoProvisionerRef" + ClusterReady clusterReadyStatus = "Ready" +) + +// clusterEndpoints holds fully-resolved connection hostnames for a cluster. +// PoolerRWHost and PoolerROHost are empty when connection pooling is disabled. +type clusterEndpoints struct { + RWHost string + ROHost string + PoolerRWHost string + PoolerROHost string +} + +// deletionPlan separates databases by their DeletionPolicy for the cleanup workflow. +type deletionPlan struct { + retained []enterprisev4.DatabaseDefinition + deleted []enterprisev4.DatabaseDefinition +} diff --git a/pkg/splunk/common/names.go b/pkg/splunk/common/names.go index cc70de668..7d1c5d4ca 100644 --- a/pkg/splunk/common/names.go +++ b/pkg/splunk/common/names.go @@ -108,6 +108,9 @@ const ( // MockClientInduceErrorDelete represents an error for delete Api MockClientInduceErrorDelete = "mockClientDeleteError" + // MockClientInduceErrorApply represents an error for apply Api (controller-runtime v0.22+ / k8s v0.34+) + MockClientInduceErrorApply = "mockClientApplyError" + // Rerr represents a random error strting Rerr = "randomError" ) diff --git a/pkg/splunk/test/controller.go b/pkg/splunk/test/controller.go index 6d43fa149..0274b63ad 100644 --- a/pkg/splunk/test/controller.go +++ b/pkg/splunk/test/controller.go @@ -504,6 +504,16 @@ func (c MockClient) Status() client.StatusWriter { return c.StatusWriter } +// Apply applies the given apply configuration to the mock client's state. +// Required by client.Client in controller-runtime v0.22+ (k8s v0.34+). +func (c MockClient) Apply(ctx context.Context, obj runtime.ApplyConfiguration, opts ...client.ApplyOption) error { + if value, ok := c.InduceErrorKind[splcommon.MockClientInduceErrorApply]; ok && value != nil { + return value + } + c.Calls["Apply"] = append(c.Calls["Apply"], MockFuncCall{CTX: ctx}) + return nil +} + // ResetCalls resets the function call tracker func (c *MockClient) ResetCalls() { c.Calls = make(map[string][]MockFuncCall) diff --git a/pkg/splunk/util/util.go b/pkg/splunk/util/util.go index df1252f25..5d6b4a214 100644 --- a/pkg/splunk/util/util.go +++ b/pkg/splunk/util/util.go @@ -211,7 +211,7 @@ func PodExecCommand(ctx context.Context, c splcommon.ControllerClient, podName s return "", "", err } } - restClient, err := podExecRESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := podExecRESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err } diff --git a/pkg/splunk/util/util_test.go b/pkg/splunk/util/util_test.go index 19c4b27df..b42d9442d 100644 --- a/pkg/splunk/util/util_test.go +++ b/pkg/splunk/util/util_test.go @@ -48,7 +48,7 @@ var fakePodExecGetConfig = func() (*rest.Config, error) { } // Faking RESTClientForGVK -var fakePodExecRESTClientForGVK = func(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, client *http.Client) (rest.Interface, error) { +var fakePodExecRESTClientForGVK = func(gvk schema.GroupVersionKind, forceDisableProtoBuf bool, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory, client *http.Client) (rest.Interface, error) { return &fakeRestInterface{}, errors.New("fakeerror") } diff --git a/test/connect-to-postgres-cluster.sh b/test/connect-to-postgres-cluster.sh new file mode 100755 index 000000000..5f45e92d2 --- /dev/null +++ b/test/connect-to-postgres-cluster.sh @@ -0,0 +1,121 @@ +#!/bin/bash +# filepath: scripts/test-postgres-connection.sh + +set -e + +# Color output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Default values +NAMESPACE="${NAMESPACE:-default}" +POSTGRES_CLUSTER_NAME="${1:-}" + +if [ -z "$POSTGRES_CLUSTER_NAME" ]; then + echo -e "${RED}Error: PostgresCluster name is required${NC}" + echo "Usage: $0 [namespace]" + echo "Example: $0 my-postgres-cluster default" + exit 1 +fi + +if [ -n "$2" ]; then + NAMESPACE="$2" +fi + +echo -e "${YELLOW}Connecting to PostgresCluster: $POSTGRES_CLUSTER_NAME in namespace: $NAMESPACE${NC}" + +# Get ConfigMap name from PostgresCluster status +CONFIGMAP_NAME=$(kubectl get postgrescluster "$POSTGRES_CLUSTER_NAME" -n "$NAMESPACE" \ + -o jsonpath='{.status.resources.configMapRef.name}' 2>/dev/null) + +if [ -z "$CONFIGMAP_NAME" ]; then + echo -e "${RED}Error: ConfigMap reference not found in PostgresCluster status${NC}" + echo "Make sure the PostgresCluster is ready and the ConfigMap has been created" + exit 1 +fi + +# Get Secret name from PostgresCluster status +SECRET_NAME=$(kubectl get postgrescluster "$POSTGRES_CLUSTER_NAME" -n "$NAMESPACE" \ + -o jsonpath='{.status.resources.secretRef.name}' 2>/dev/null) + +if [ -z "$SECRET_NAME" ]; then + echo -e "${RED}Error: Secret reference not found in PostgresCluster status${NC}" + echo "Make sure the PostgresCluster is ready and the Secret has been created" + exit 1 +fi + +echo -e "${GREEN}Found ConfigMap: $CONFIGMAP_NAME${NC}" +echo -e "${GREEN}Found Secret: $SECRET_NAME${NC}" + +# Extract connection details from ConfigMap (using correct uppercase keys) +echo -e "\n${YELLOW}Extracting connection details...${NC}" +DB_PORT=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.DEFAULT_CLUSTER_PORT}') +DB_USER=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.SUPER_USER_NAME}') +RW_SERVICE_FQDN=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.CLUSTER_RW_ENDPOINT}') +RO_SERVICE_FQDN=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.CLUSTER_RO_ENDPOINT}') +R_SERVICE_FQDN=$(kubectl get configmap "$CONFIGMAP_NAME" -n "$NAMESPACE" -o jsonpath='{.data.CLUSTER_R_ENDPOINT}') + +# Extract just the service name (first part before the dot) +RW_SERVICE=$(echo "$RW_SERVICE_FQDN" | cut -d'.' -f1) +RO_SERVICE=$(echo "$RO_SERVICE_FQDN" | cut -d'.' -f1) +R_SERVICE=$(echo "$R_SERVICE_FQDN" | cut -d'.' -f1) + +# Extract password from Secret +DB_PASSWORD=$(kubectl get secret "$SECRET_NAME" -n "$NAMESPACE" -o jsonpath='{.data.password}' | base64 -d) + +# Get database name from CNPG cluster (assuming it matches the PostgresCluster name or is 'app') +DB_NAME=$(kubectl get cluster "$POSTGRES_CLUSTER_NAME" -n "$NAMESPACE" -o jsonpath='{.spec.bootstrap.initdb.database}' 2>/dev/null || echo "postgres") + +echo -e "${GREEN}Connection Details:${NC}" +echo " RW Service: $RW_SERVICE_FQDN" +echo " RO Service: $RO_SERVICE_FQDN" +echo " R Service: $R_SERVICE_FQDN" +echo " Port: $DB_PORT" +echo " Database: $DB_NAME" +echo " User: $DB_USER" + +# Check if psql is installed +if ! command -v psql &> /dev/null; then + echo -e "\n${YELLOW}psql client not found. Using kubectl run with postgres image...${NC}" + + echo -e "${YELLOW}Creating temporary pod for connection test...${NC}" + + kubectl run postgres-client-test \ + --rm -i --tty \ + --image=postgres:16 \ + --restart=Never \ + --namespace="$NAMESPACE" \ + --env="PGPASSWORD=$DB_PASSWORD" \ + -- psql -h "$RW_SERVICE_FQDN" -p "$DB_PORT" -U "$DB_USER" -d "$DB_NAME" +else + # Use port-forward for local connection + echo -e "\n${YELLOW}Setting up port-forward to PostgreSQL service...${NC}" + + # Kill any existing port-forward on 5432 + pkill -f "kubectl.*port-forward.*$RW_SERVICE" 2>/dev/null || true + + # Start port-forward in background (use service name only, not FQDN) + kubectl port-forward -n "$NAMESPACE" "service/$RW_SERVICE" 5432:$DB_PORT > /dev/null 2>&1 & + PORT_FORWARD_PID=$! + + # Cleanup function + cleanup() { + echo -e "\n${YELLOW}Cleaning up port-forward...${NC}" + kill $PORT_FORWARD_PID 2>/dev/null || true + } + trap cleanup EXIT + + # Wait for port-forward to be ready + echo -e "${YELLOW}Waiting for port-forward to be ready...${NC}" + sleep 3 + + echo -e "${GREEN}Connecting to PostgreSQL...${NC}" + echo -e "${YELLOW}Password: $DB_PASSWORD${NC}\n" + + # Use connection string format which is more reliable + # Disable GSSAPI and use password authentication only + PGPASSWORD="$DB_PASSWORD" psql "postgresql://$DB_USER@localhost:5432/$DB_NAME?gssencmode=disable" \ + || PGPASSWORD="$DB_PASSWORD" psql -h localhost -p 5432 -U "$DB_USER" -d "$DB_NAME" --no-psqlrc +fi \ No newline at end of file diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index e639a9513..cb3624c99 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -217,7 +217,7 @@ func (d *Deployment) PodExecCommand(ctx context.Context, podName string, cmd []s return "", "", err } //FIXME - restClient, err := apiutil.RESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := apiutil.RESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err } @@ -264,7 +264,7 @@ func (d *Deployment) OperatorPodExecCommand(ctx context.Context, podName string, return "", "", err } //FIXME - restClient, err := apiutil.RESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := apiutil.RESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err } diff --git a/test/testenv/ingest_utils.go b/test/testenv/ingest_utils.go index 2c0403b1e..d4606ef31 100644 --- a/test/testenv/ingest_utils.go +++ b/test/testenv/ingest_utils.go @@ -187,7 +187,7 @@ func CopyFileToPod(ctx context.Context, podName string, srcPath string, destPath if err != nil { return "", "", err } - restClient, err := apiutil.RESTClientForGVK(gvk, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) + restClient, err := apiutil.RESTClientForGVK(gvk, false, false, restConfig, serializer.NewCodecFactory(scheme.Scheme), http.DefaultClient) if err != nil { return "", "", err }