From 96b5353e58c845114098bade2887bd2f16b62aef Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 12:12:38 +0900 Subject: [PATCH 01/44] Copy slime chart as http-service base Starting point for the new http-service chart. Subsequent commits will add opinionated defaults for HTTP services. Co-Authored-By: Claude Opus 4.6 --- http-service/.helmignore | 21 ++ http-service/Chart.yaml | 18 + http-service/Makefile | 78 +++++ http-service/README.md | 116 ++++++ http-service/examples/cronjob-advanced.yaml | 161 +++++++++ .../examples/cronjob-extra-volumes.yaml | 26 ++ http-service/examples/cronjob.yaml | 14 + http-service/examples/deployment-hpa.yaml | 95 +++++ http-service/examples/deployment-ingress.yaml | 94 +++++ http-service/examples/deployment.yaml | 148 ++++++++ http-service/templates/_helpers.tpl | 59 ++++ http-service/templates/configmap.yaml | 30 ++ http-service/templates/cronjob.yaml | 292 ++++++++++++++++ http-service/templates/deployment.yaml | 203 +++++++++++ http-service/templates/hpa.yaml | 27 ++ http-service/templates/ingress.yaml | 50 +++ http-service/templates/pdb.yaml | 30 ++ http-service/templates/rbac.yaml | 72 ++++ http-service/templates/secret.yaml | 36 ++ http-service/templates/service.yaml | 34 ++ http-service/templates/tests/test.yaml | 45 +++ http-service/values.yaml | 329 ++++++++++++++++++ 22 files changed, 1978 insertions(+) create mode 100644 http-service/.helmignore create mode 100644 http-service/Chart.yaml create mode 100644 http-service/Makefile create mode 100644 http-service/README.md create mode 100644 http-service/examples/cronjob-advanced.yaml create mode 100644 http-service/examples/cronjob-extra-volumes.yaml create mode 100644 http-service/examples/cronjob.yaml create mode 100644 http-service/examples/deployment-hpa.yaml create mode 100644 http-service/examples/deployment-ingress.yaml create mode 100644 http-service/examples/deployment.yaml create mode 100644 http-service/templates/_helpers.tpl create mode 100644 http-service/templates/configmap.yaml create mode 100644 http-service/templates/cronjob.yaml create mode 100644 http-service/templates/deployment.yaml create mode 100644 http-service/templates/hpa.yaml create mode 100644 http-service/templates/ingress.yaml create mode 100644 http-service/templates/pdb.yaml create mode 100644 http-service/templates/rbac.yaml create mode 100644 http-service/templates/secret.yaml create mode 100644 http-service/templates/service.yaml create mode 100644 http-service/templates/tests/test.yaml create mode 100644 http-service/values.yaml diff --git a/http-service/.helmignore b/http-service/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/http-service/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/http-service/Chart.yaml b/http-service/Chart.yaml new file mode 100644 index 00000000..87d009d1 --- /dev/null +++ b/http-service/Chart.yaml @@ -0,0 +1,18 @@ +apiVersion: v2 +name: slime +description: ₍Ꙭ̂₎ < Not my bad slime + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 1.3.1 diff --git a/http-service/Makefile b/http-service/Makefile new file mode 100644 index 00000000..b169986f --- /dev/null +++ b/http-service/Makefile @@ -0,0 +1,78 @@ +KUBERNETES_VERSION = $${KUBERNETES_VERSION:-"1.33.0"} +RELEASE = $$(basename $$PWD) + +.PHONY: install +install: + helm upgrade -i -f examples/deployment.yaml --wait $(RELEASE) . + +.PHONY: lint +lint: lint-deployment lint-deployment-ingress lint-deployment-hpa lint-cronjob lint-cronjob-advanced + +.PHONY: lint-deployment +lint-deployment: + @echo "=> Linting examples/deployment.yaml" + helm lint --strict -f examples/deployment.yaml + @echo "=> Validating examples/deployment.yaml" + helm template -f examples/deployment.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error + +.PHONY: lint-deployment-ingress +lint-deployment-ingress: + @echo "=> Linting examples/deployment-ingress.yaml" + helm lint --strict -f examples/deployment-ingress.yaml + @echo "=> Validating examples/deployment-ingress.yaml" + helm template -f examples/deployment-ingress.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error + +.PHONY: lint-deployment-hpa +lint-deployment-hpa: + @echo "=> Linting examples/deployment-hpa.yaml" + helm lint --strict -f examples/deployment-hpa.yaml + @echo "=> Validating examples/deployment-hpa.yaml" + helm template -f examples/deployment-hpa.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error + +.PHONY: lint-cronjob +lint-cronjob: + @echo "=> Linting examples/cronjob.yaml" + helm lint --strict -f examples/cronjob.yaml + @echo "=> Validating examples/cronjob.yaml" + helm template -f examples/cronjob.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error + +.PHONY: lint-cronjob-advanced +lint-cronjob-advanced: + @echo "=> Linting examples/cronjob-advanced.yaml" + helm lint --strict -f examples/cronjob-advanced.yaml + @echo "=> Validating examples/cronjob-advanced.yaml" + helm template -f examples/cronjob-advanced.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error + +.PHONY: test +test: test-deployment test-cronjob test-cronjob-advanced + +.PHONY: test-deployment +test-deployment: + @if helm ls | grep -v "NAME" | cut -f1 | grep -e "^$(RELEASE)$$" > /dev/null; then helm uninstall $(RELEASE); fi + @echo "=> Testing examples/deployment.yaml" + helm upgrade -i --wait -f examples/deployment.yaml $(RELEASE)-deployment . + sleep 30 + helm test $(RELEASE)-deployment --logs + helm uninstall $(RELEASE)-deployment + +.PHONY: test-cronjob +test-cronjob: + @if helm ls | grep -v "NAME" | cut -f1 | grep -e "^$(RELEASE)$$" > /dev/null; then helm uninstall $(RELEASE); fi + @echo "=> Testing examples/cronjob.yaml" + helm upgrade -i --wait -f examples/cronjob.yaml $(RELEASE)-cronjob . + sleep 30 + helm test $(RELEASE)-cronjob --logs + helm uninstall $(RELEASE)-cronjob + +.PHONY: test-cronjob-advanced +test-cronjob-advanced: + @if helm ls | grep -v "NAME" | cut -f1 | grep -e "^$(RELEASE)$$" > /dev/null; then helm uninstall $(RELEASE); fi + @echo "=> Testing examples/cronjob-advanced.yaml" + helm upgrade -i --wait -f examples/cronjob-advanced.yaml $(RELEASE)-cronjob-advanced . + sleep 30 + helm test $(RELEASE)-cronjob-advanced --logs + helm uninstall $(RELEASE)-cronjob-advanced + +.PHONY: uninstall +uninstall: + @if helm ls | grep -v "NAME" | cut -f1 | grep -e "^$(RELEASE)$$" > /dev/null; then helm uninstall $(RELEASE); fi diff --git a/http-service/README.md b/http-service/README.md new file mode 100644 index 00000000..417bec8a --- /dev/null +++ b/http-service/README.md @@ -0,0 +1,116 @@ +# slime + +₍Ꙭ̂₎ < Not my bad slime + +₍Ꙭ̂₎ < I will transform into anything + +## TL;DR; + +``` +$ helm install chatwork/slime +``` + +## Prerequisites + +* Kubernetes 1.18+ + +## Installing the Chart + + +To install the chart with the release name `my-release`: + +``` +$ helm install --name my-release chatwork/slime +``` + +The command deploys the slime chart on the Kubernetes cluster in the default configuration. The [configuration](https://github.com/chatwork/charts/tree/master/slime#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +``` +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Difference from raw chart + +[raw chart](https://github.com/chatwork/charts/tree/master/raw) is is very useful, but it is too flexible and can be difficult to write if you are not used to helm. +Therefore, this chart has some format for writing. If it is a simple `deployment` + `service` + `ingress`, this chart is surely easier to write than `raw chart`, +but it does not allow you to define any resources(CRDS, cert-manager,...) freely. + +Use each charts as needed or use them together. + +## Configuration + +The following table lists the configurable parameters of the slime chart and their default values. + +| Parameter | Description | Default | +|--------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| `nameOverride` | Override name of app | `null` | +| `fullnameOverride` | Override the full qualified app name | `null` | +| `deployment.enabled` | Enable deployment | `false` | +| `strategy` | rolling update strategy for deployment | `{}` | +| `annotations` | annotations for deployment | `{}` | +| `labels` | labels for deployment | `{}` | +| `replicas` | replicas for deployment | `1` | +| `revisionHistoryLimit` | revisionHistoryLimit | `""` | +| `podAnnotations` | pod annotations | `{}` | +| `podLabels` | pod labels | `{}` | +| `podSecurityContext` | pod securityContext | `{}` | +| `affinity` | affinity | `{}` | +| `nodeSelector` | nodeSelector | `{}` | +| `imagePullSecrets` | imagePullSecrets | `[]` | +| `readinessGates` | readinessGates | `[]` | +| `priorityClassName` | priorityClassName | `""` | +| `progressDeadlineSeconds` | progressDeadlineSeconds | `""` | +| `volumes` | pod volumes(initContainers, containers) | `[]` | +| `containers` | application containers | `[]` | +| `initContainers.enabled` | if true, you can use initContainers | `false` | +| `initContainers.containers` | initContainers config | `[]` | +| `configmaps` | transform ConfigMap manifest. You can set `binaryData`, `data` | `{}` | +| `secrets` | transform Secret's manifest. You can set `data`, `stringData` and `type` | `{}` | +| `autoscaling.enabled` | if true, you can use hpa | `false` | +| `autoscaling.behavior` | autscaling behavior https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior | `{}` | +| `autoscaling.metrics` | autoscaling metrics | `[]` | +| `autoscaling.maxReplicas` | autoscaling maxReplicas | `2` | +| `autoscaling.minReplicas` | autoscaling minReplicas | `1` | +| `service.enabled` | if true, you can use service | `false` | +| `service.type` | service type(ClusterIP, NodePort, LoadBalancer) | `"ClusterIP"` | +| `service.ports` | service ports | `{}` | +| `clusterRole.enabled` | if true, you can use clusterRole | `false` | +| `clusterRole.rules` | clusterRole rules | `[]` | +| `role.enabled` | if true, you can use role | `false` | +| `role.rules` | role rules | `[]` | +| `serviceAccount.create` | if true, you can create serviceAccount | `false` | +| `serviceAccount.name` | if you create serviceAccount, you can set name | `null` | +| `serviceAccount.labels` | service account labels | `{}` | +| `serviceAccount.annotations` | serviceAccount annotations | `{}` | +| `podDisruptionBudget.enabled` | if ture, you can use podDisruptionBudget | `false` | +| `podDisruptionBudget.annotations` | podDisruptionBudget annotations | `{}` | +| `podDisruptionBudget.labels` | podDisruptionBudget labels | `{}` | +| `podDisruptionBudget.maxUnavailable` | podDisruptionBudget maxUnavailable | `null` | +| `podDisruptionBudget.minAvailable` | podDisruptionBudget minAvailable | `null` | +| `ingress.enabled` | if true, you can use ingress | `false` | +| `ingress.ingresses` | ingresses config | `{}` | +| `cronJob.enabled` | if true, you can use CronJob | `false` | +| `schedule` | CronJob's schedule | `""` | +| `cronJobRestartPolicy` | CronJob's restartPolicy | `OnFailure` | +| `concurrencyPolicy` | CronJob's concurrencyPolicy | `Allow` | +| `failedJobsHistoryLimit` | CronJob's failedJobsHistoryLimit | `1` | +| `startingDeadlineSeconds` | CronJob's startingDeadlineSeconds | `null` | +| `successfulJobsHistoryLimit` | CronJob's successfulJobsHistoryLimit | `3` | +| `suspend` | CronJob's suspend | `null` | +| `timeZone` | CronJob's timeZone https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#cronjob-v1-batch | `null` | +| `cronJobContainers` | CronJob's containers https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#jobtemplatespec-v1-batch | `[]` | +| `cronJobVolumes` | CronJob's pod volumes(initContainers, containers) | `[]` | +| `extraCronJobVolumes` | CronJob's extra pod volumes(initContainers, containers). Use when you want to add volume other than the common settings for each application. | `[]` | +| `test.enabled` | if true, you can use helm test | `false` | +| `test.containers` | helm test container config | `[]` | + + +# generate README + +This README is generated with https://github.com/rapidsai/frigate diff --git a/http-service/examples/cronjob-advanced.yaml b/http-service/examples/cronjob-advanced.yaml new file mode 100644 index 00000000..0399e3c0 --- /dev/null +++ b/http-service/examples/cronjob-advanced.yaml @@ -0,0 +1,161 @@ +fullnameOverride: "slime-advanced" + +cronJob: + enabled: true + +cronJobAnnotations: + release/cronjob-annotation: advanced + +cronJobLabels: + release/cronjob-label: advanced + +schedule: "* * * * *" +concurrencyPolicy: Replace +failedJobsHistoryLimit: 3 +startingDeadlineSeconds: 60 +successfulJobsHistoryLimit: 5 +timeZone: Asia/Tokyo + +cronJobTemplateAnnotations: + slime/cronjob-template-annotation: advanced + +cronJobTemplateLabels: + slime/cronjob-template-label: advanced + +activeDeadlineSeconds: 30 +backoffLimit: 10 + + +#completionMode: NonIndexed +#completions: 1 +#manualSelector: true +#parallelism: 1 +#jobTemplateSuspend: "false" +#ttlSecondsAfterFinished: 300 + +cronJobPodActiveDeadlineSeconds: 15 + +#cronJobAffinity: +# podAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# - labelSelector: +# matchExpressions: +# - key: security +# operator: In +# values: +# - S1 + +#automountServiceAccountToken: true + +#cronJobDnsConfig: +# nameservers: +# - 127.0.0.1 +# searches: +# - ns1.svc.cluster.local + +#cronJobUseHostNetwork: true +#cronJobEnableServiceLinks: true +#cronJobHostAliases: +#- ip: "127.0.0.1" +# hostnames: +# - "foo.local" +# - "bar.local" +#cronJobHostIPC: true +#cronJobHostPID: true +#cronJobHostUsers: true +#cronJobHostname: test + +#cronJobImagePullSecrets: +# - name: slime-secret + +cronJobPodAnnotations: + slime/cronjob-pod-annotation: advanced + +cronJobPodLabels: + slime/cronjob-pod-label: advanced + +cronJobContainers: + - name: job + image: + repository: hello-world + tag: latest + imagePullPolicy: IfNotPresent + env: + - name: APP_NAME + value: 'slime-advanced' + envFrom: + - secretRef: + name: 'slime-advanced-env' + volumeMounts: + - name: configs + mountPath: /configs + +cronJobInitContainers: + enabled: true + containers: + - name: init + image: + repository: busybox + tag: latest + +cronJobVolumes: + - name: configs + configMap: + name: "slime-advanced-config" + defaultMode: 0644 + + +#cronJobNodeName: slime + +#cronJobPreemptionPolicy: Never + +#cronJobNodeSelector: +# disktype: ssd + +#cronJobPriority: 1 + +#cronJobPriorityClassName: system-node-critical + +#cronJobRestartPolicy: Never + +#cronJobSchedulerName: slime + +#cronJobSecurityContext: +# runAsUser: 1000 +# fsGroup: 2000 + +#cronJobSetHostnameAsFQDN: "false" + +#cronJobShareProcessNamespace: "false" + +#cronJobSubdomain: hostname.subdomain.pod-namespace.svc.cluster-domain + +cronJobTerminationGracePeriodSeconds: 120 + +#cronJobTolerations: +#- key: "key1" +# operator: "Equal" +# value: "value1" +# effect: "NoSchedule" + +#cronJobTopologySpreadConstraints: +# enabled: true +# constraints: +# - maxSkew: 1 +# topologyKey: topology.kubernetes.io/zone +# whenUnsatisfiable: DoNotSchedule +# labelSelector: +# app: myapp + +secrets: + env: + type: Opaque + data: + SECRET_VALUE: 'slime-advanced' + +configmaps: + config: + data: + greeting: "Hello slime." + properties: | + mode=default diff --git a/http-service/examples/cronjob-extra-volumes.yaml b/http-service/examples/cronjob-extra-volumes.yaml new file mode 100644 index 00000000..391afd27 --- /dev/null +++ b/http-service/examples/cronjob-extra-volumes.yaml @@ -0,0 +1,26 @@ +cronJob: + enabled: true + +schedule: "*/5 * * * *" +restartPolicy: OnFailure + +cronJobContainers: + - name: job + image: + repository: hello-world + tag: latest + imagePullPolicy: IfNotPresent + +cronJobRestartPolicy: Never + +cronJobVolumes: + - name: configs + configMap: + name: "slime-config" + defaultMode: 0644 + +extraCronJobVolumes: + - name: extra-configs + configMap: + name: "slime-extra-config" + defaultMode: 0644 diff --git a/http-service/examples/cronjob.yaml b/http-service/examples/cronjob.yaml new file mode 100644 index 00000000..f7a0e8cf --- /dev/null +++ b/http-service/examples/cronjob.yaml @@ -0,0 +1,14 @@ +cronJob: + enabled: true + +schedule: "*/5 * * * *" +restartPolicy: OnFailure + +cronJobContainers: + - name: job + image: + repository: hello-world + tag: latest + imagePullPolicy: IfNotPresent + +cronJobRestartPolicy: Never diff --git a/http-service/examples/deployment-hpa.yaml b/http-service/examples/deployment-hpa.yaml new file mode 100644 index 00000000..f20873ff --- /dev/null +++ b/http-service/examples/deployment-hpa.yaml @@ -0,0 +1,95 @@ +fullnameOverride: nginx-example-hpa +nameOverride: nginx-example-hpa + +deployment: + enabled: true + +strategy: + type: RollingUpdate + + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + +annotations: + a.b: c + +labels: + a/b: c + +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 10 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - "{{ .Release.Name}}" + topologyKey: kubernetes.io/hostname + +containers: + - name: nginx + image: + repository: nginx + tag: latest + + lifecycle: + preStop: + exec: + command: ["sh", "-c", "sleep 10"] + + ports: + - name: http + containerPort: 80 + protocol: TCP + + resources: + requests: + cpu: 0.1 + memory: 256Mi + limits: + cpu: 0.1 + memory: 256Mi + + - name: ubuntu-sleep-infinity + image: + repository: ubuntu + tag: latest + + command: ["sleep"] + args: ["infinity"] + +service: + enabled: true + type: ClusterIP + clusterIP: None + ports: + http: + targetPort: 80 + port: 80 + protocol: TCP + +autoscaling: + enabled: true + maxReplicas: 2 + minReplicas: 2 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 + +podDisruptionBudget: + enabled: true + maxUnavailable: 1 + +test: + enabled: false diff --git a/http-service/examples/deployment-ingress.yaml b/http-service/examples/deployment-ingress.yaml new file mode 100644 index 00000000..444c3eee --- /dev/null +++ b/http-service/examples/deployment-ingress.yaml @@ -0,0 +1,94 @@ +fullnameOverride: nginx-example-ingress +nameOverride: nginx-example-ingress + +deployment: + enabled: true + +strategy: + type: RollingUpdate + + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + +replicas: 2 + +containers: + - name: nginx + image: + repository: nginx + tag: latest + + ports: + - name: http + containerPort: 80 + protocol: TCP + + resources: + requests: + cpu: 0.1 + memory: 128Mi + limits: + cpu: 0.1 + memory: 128Mi + +service: + enabled: true + type: ClusterIP + ports: + http: + targetPort: 80 + port: 80 + protocol: TCP + +ingress: + enabled: true + ingresses: + example1: + ingressClassName: alb + annotations: + alb.ingress.kubernetes.io/backend-protocol: HTTP + alb.ingress.kubernetes.io/healthcheck-path: / + alb.ingress.kubernetes.io/healthcheck-port: '80' + alb.ingress.kubernetes.io/healthcheck-protocol: HTTP + alb.ingress.kubernetes.io/inbound-cidrs: '0.0.0.0/0' + alb.ingress.kubernetes.io/listen-ports: |- + [ + { + "HTTP": 80 + } + ] + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/target-type: ip + hosts: + - host: example.com + paths: + - path: "/path1/*" + pathType: ImplementationSpecific + portNumber: 80 + + example2: + ingressClassName: alb + annotations: + alb.ingress.kubernetes.io/backend-protocol: HTTP + alb.ingress.kubernetes.io/healthcheck-path: / + alb.ingress.kubernetes.io/healthcheck-port: '80' + alb.ingress.kubernetes.io/healthcheck-protocol: HTTP + alb.ingress.kubernetes.io/inbound-cidrs: '0.0.0.0/0' + alb.ingress.kubernetes.io/listen-ports: |- + [ + { + "HTTP": 80 + } + ] + alb.ingress.kubernetes.io/scheme: internal + alb.ingress.kubernetes.io/target-type: ip + hosts: + - host: example.com + paths: + - path: "/path2/*" + pathType: ImplementationSpecific + portNumber: 80 + +test: + enabled: false diff --git a/http-service/examples/deployment.yaml b/http-service/examples/deployment.yaml new file mode 100644 index 00000000..403e1f6d --- /dev/null +++ b/http-service/examples/deployment.yaml @@ -0,0 +1,148 @@ +#fullnameOverride: nginx-example +#nameOverride: nginx-example + +deployment: + enabled: true + +strategy: + type: RollingUpdate + + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + +replicas: 2 + +annotations: + a.b: c + +labels: + a/b: c + +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 10 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app.kubernetes.io/instance + operator: In + values: + - nginx-example + topologyKey: kubernetes.io/hostname + +configmap: + name1: log-config1 + +secret: + name1: aws-credentials1 + +volumes: + - name: config-vol + configMap: + name: '{{ include "slime.fullname" . }}-{{ .Values.configmap.name1 }}' + items: + - key: log_level + path: log_level + +configmaps: + "{{ .Values.configmap.name1 }}": + labels: + hoge: "1" + annotations: + foo: a + data: + log_level: "ERROR" + + log-config2: + labels: + hoge: "2" + data: + log_level: "ERROR" + +secrets: + "{{ .Values.secret.name1 }}": + labels: + hoge: "1" + annotations: + foo: a + + data: + AWS_ACCESS_KEY: "YWZiY2RlYTEyMzQzCg==" + + aws-credentials2: + annotations: + foo: a + data: + AWS_ACCESS_KEY: "YWZiY2RlYTEyMzQzCg==" + +containers: + - name: nginx + image: + repository: nginx + tag: latest + + volumeMounts: + - name: config-vol + mountPath: /etc/config + + lifecycle: + preStop: + exec: + command: ["sh", "-c", "sleep 10"] + + ports: + - name: http + containerPort: 80 + protocol: TCP + + resources: + requests: + cpu: 0.1 + memory: 128Mi + limits: + cpu: 0.1 + memory: 128Mi + + - name: ubuntu-sleep-infinity + image: + repository: ubuntu + tag: latest + + command: ["sh", "-c", "sleep infinity"] + + envFrom: + - secretRef: + name: '{{ include "slime.fullname" . }}-{{ .Values.secret.name1 }}' + + resources: + requests: + cpu: 0.1 + memory: 64Mi + limits: + cpu: 0.1 + memory: 64Mi + +service: + enabled: true + type: ClusterIP + ports: + http: + targetPort: 80 + port: 80 + protocol: TCP + +autoscaling: + enabled: false + +test: + enabled: true + containers: + - name: test + + image: + repository: curlimages/curl + tag: latest + + command: ["sh","-c","sleep 30; echo $APP_FULLNAME.$NAMESPACE; curl $APP_FULLNAME.$NAMESPACE"] diff --git a/http-service/templates/_helpers.tpl b/http-service/templates/_helpers.tpl new file mode 100644 index 00000000..f45c347f --- /dev/null +++ b/http-service/templates/_helpers.tpl @@ -0,0 +1,59 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "slime.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "slime.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "slime.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "slime.labels" -}} +helm.sh/chart: {{ include "slime.chart" . }} +{{ include "slime.selectorLabels" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "slime.selectorLabels" -}} +app.kubernetes.io/name: {{ include "slime.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "slime.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "slime.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/http-service/templates/configmap.yaml b/http-service/templates/configmap.yaml new file mode 100644 index 00000000..273e42dc --- /dev/null +++ b/http-service/templates/configmap.yaml @@ -0,0 +1,30 @@ +{{- $root := . }} +{{- range $name, $value := .Values.configmaps }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "slime.fullname" $root }}-{{ tpl $name $root }} + namespace: {{ $root.Release.Namespace }} + labels: + {{- include "slime.labels" $root | nindent 4 }} + {{- with $value.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + checksum/configmap: '{{ toYaml $value | sha256sum }}' + {{- with $value.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if and (semverCompare ">=1.19-0" $root.Capabilities.KubeVersion.GitVersion) $value.immutable }} +immutable: {{ $value.immutable }} +{{- end }} +{{- with $value.binaryData }} +binaryData: + {{- . | toYaml | nindent 2 }} +{{- end }} +{{- with $value.data }} +data: + {{- . | toYaml | nindent 2 }} +{{- end }} +{{- end }} diff --git a/http-service/templates/cronjob.yaml b/http-service/templates/cronjob.yaml new file mode 100644 index 00000000..0db283fa --- /dev/null +++ b/http-service/templates/cronjob.yaml @@ -0,0 +1,292 @@ +{{- if .Values.cronJob.enabled -}} +{{- $root := . }} +# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#cronjob-v1-batch +apiVersion: batch/v1 +kind: CronJob +metadata: + name: {{ include "slime.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- with .Values.cronJobAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "slime.labels" . | nindent 4 }} + {{- with .Values.cronJobLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} +# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#cronjobspec-v1-batch +spec: + {{- with .Values.concurrencyPolicy }} + concurrencyPolicy: {{ . }} + {{- end }} + {{- with .Values.failedJobsHistoryLimit }} + failedJobsHistoryLimit: {{ . }} + {{- end }} + schedule: "{{ .Values.schedule }}" + {{- with .Values.startingDeadlineSeconds }} + startingDeadlineSeconds: {{ . }} + {{- end }} + {{- with .Values.successfulJobsHistoryLimit }} + successfulJobsHistoryLimit: {{ . }} + {{- end }} + {{- with .Values.suspend }} + suspend: {{ . }} + {{- end }} + # Available in 1.27 or later + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-job-limitations + {{- with .Values.timeZone }} + timeZone: {{ . }} + {{- end }} + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#jobtemplatespec-v1-batch + jobTemplate: + metadata: + {{- with .Values.cronJobTemplateAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "slime.labels" . | nindent 8 }} + {{- with .Values.cronJobTemplateLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#jobspec-v1-batch + {{- with .Values.activeDeadlineSeconds }} + activeDeadlineSeconds: {{ . }} + {{- end }} + {{- with .Values.backoffLimit }} + backoffLimit: {{ . }} + {{- end }} + {{- with .Values.completionMode }} + completionMode: {{ . }} + {{- end }} + {{- with .Values.completions }} + completions: {{ . }} + {{- end }} + {{- with .Values.manualSelector }} + manualSelector: {{ . }} + {{- end }} + {{- with .Values.parallelism }} + parallelism: {{ . }} + {{- end }} + {{- with .Values.jobTemplateSuspend }} + suspend: {{ . }} + {{- end }} + {{- with .Values.ttlSecondsAfterFinished }} + ttlSecondsAfterFinished: {{ . }} + {{- end }} + template: + metadata: + {{- with .Values.cronJobPodAnnotations }} + annotations: + {{- toYaml . | nindent 12 }} + {{- end }} + labels: + {{- include "slime.labels" . | nindent 12 }} + {{- with .Values.cronJobPodLabels }} + {{- toYaml . | nindent 12 }} + {{- end }} + spec: + {{- with .Values.cronJobPodActiveDeadlineSeconds }} + activeDeadlineSeconds: {{ . }} + {{- end }} + {{- with .Values.cronJobAffinity }} + affinity: + {{- tpl (. | toYaml) $root | nindent 14 }} + {{- end }} + {{- with .Values.automountServiceAccountToken }} + automountServiceAccountToken: {{ . }} + {{- end }} + containers: + {{- range $container := .Values.cronJobContainers }} + - name: {{ include "slime.fullname" $root }}-{{ $container.name }} + {{- with $container.securityContext }} + securityContext: + {{- toYaml . | nindent 16 }} + {{- end }} + image: {{ $container.image.repository }}:{{ $container.image.tag }} + {{- with $container.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with $container.ports }} + ports: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- with $container.command }} + command: + {{- toYaml . | nindent 16 }} + {{- end }} + {{- with $container.args }} + args: + {{- toYaml . | nindent 16 }} + {{- end }} + {{- with $container.workingDir }} + workingDir: {{ . }} + {{- end }} + {{- with $container.env }} + env: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- with $container.envFrom }} + envFrom: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- with $container.lifecycle }} + lifecycle: + {{- toYaml . | nindent 16 }} + {{- end }} + {{- with $container.startupProbe }} + startupProbe: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- with $container.readinessProbe }} + readinessProbe: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- with $container.livenessProbe }} + livenessProbe: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- with $container.resources }} + resources: + {{- toYaml . | nindent 16 }} + {{- end }} + {{- with $container.volumeMounts }} + volumeMounts: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- end }} + {{- with .Values.cronJobDnsConfig }} + dnsConfig: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.cronJobUseHostNetwork }} + hostNetwork: {{ .Values.cronJobUseHostNetwork }} + dnsPolicy: ClusterFirstWithHostNet + {{- end }} + {{- if and (not .Values.cronJobUseHostNetwork) .Values.cronJobDnsPolicy }} + dnsPolicy: {{ .Values.cronJobDnsPolicy }} + {{- end }} + {{- with .Values.cronJobEnableServiceLinks }} + enableServiceLinks: {{ . }} + {{- end }} + {{- with .Values.cronJobHostAliases }} + hostAliases: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.cronJobHostIPC }} + hostIPC: {{ . }} + {{- end }} + {{- with .Values.cronJobHostPID }} + hostPID: {{ . }} + {{- end }} + {{- with .Values.cronJobHostUsers }} + hostUsers: {{ . }} + {{- end }} + {{- with .Values.cronJobHostname }} + hostname: {{ . }} + {{- end }} + {{- with .Values.cronJobImagePullSecrets }} + imagePullSecrets: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- if .Values.cronJobInitContainers.enabled }} + initContainers: + {{- range $container := .Values.cronJobInitContainers.containers }} + - name: {{ include "slime.fullname" $root }}-{{ $container.name }} + image: {{ $container.image.repository }}:{{ $container.image.tag }} + {{- with $container.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with $container.command }} + command: + {{- toYaml . | nindent 14 }} + {{- end }} + {{- with $container.args }} + args: + {{- toYaml . | nindent 14 }} + {{- end }} + {{- with $container.workingDir }} + workingDir: {{ . }} + {{- end }} + {{- with $container.env }} + env: + {{- tpl (. | toYaml) $root | nindent 16 }} + {{- end }} + {{- with $container.envFrom }} + envFrom: + {{- tpl (. | toYaml) $root | nindent 14 }} + {{- end }} + {{- with $container.volumeMounts }} + volumeMounts: + {{- tpl (. | toYaml) $root | nindent 14 }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.cronJobNodeName }} + nodeName: {{ . }} + {{- end }} + {{- with .Values.cronJobNodeSelector }} + nodeSelector: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with .Values.cronJobPreemptionPolicy }} + preemptionPolicy: {{ . }} + {{- end }} + {{- with .Values.cronJobPriority }} + priority: {{ . }} + {{- end }} + {{- with .Values.cronJobPriorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- if .Values.cronJobRestartPolicy }} + restartPolicy: {{ .Values.cronJobRestartPolicy }} + {{- else }} + restartPolicy: "OnFailure" + {{- end }} + {{- with .Values.cronJobSchedulerName }} + schedulerName: {{ . }} + {{- end }} + serviceAccountName: {{ include "slime.serviceAccountName" . }} + {{- with .Values.cronJobSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.cronJobSetHostnameAsFQDN }} + setHostnameAsFQDN: {{ . }} + {{- end }} + {{- with .Values.cronJobShareProcessNamespace }} + shareProcessNamespace: {{ . }} + {{- end }} + {{- with .Values.cronJobSubdomain }} + subdomain: {{ . }} + {{- end }} + {{- with .Values.cronJobTerminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + {{- with .Values.cronJobTolerations }} + tolerations: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- if .Values.cronJobTopologySpreadConstraints.enabled }} + topologySpreadConstraints: + {{- range $constraint := .Values.cronJobTopologySpreadConstraints.constraints }} + - maxSkew: {{ $constraint.maxSkew }} + topologyKey: {{ $constraint.topologyKey }} + whenUnsatisfiable: {{ $constraint.whenUnsatisfiable }} + labelSelector: + matchLabels: + {{- include "slime.selectorLabels" $root | nindent 18 }} + {{- end }} + {{- end }} + {{- if or (.Values.cronJobVolumes) (.Values.extraCronJobVolumes) }} + volumes: + {{- with .Values.cronJobVolumes }} + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with .Values.extraCronJobVolumes }} + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml new file mode 100644 index 00000000..e5330786 --- /dev/null +++ b/http-service/templates/deployment.yaml @@ -0,0 +1,203 @@ +{{- if .Values.deployment.enabled -}} +{{- $root := . }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "slime.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "slime.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.autoscaling.enabled) .Values.replicas }} + replicas: {{ .Values.replicas }} + {{- end }} + selector: + matchLabels: + {{- include "slime.selectorLabels" . | nindent 6 }} + {{- with .Values.deployment.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.revisionHistoryLimit }} + revisionHistoryLimit: {{ . }} + {{- end }} + template: + metadata: + annotations: + checksum/configmap: '{{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}' + checksum/secret: '{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}' + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "slime.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.useHostNetwork }} + hostNetwork: {{ .Values.useHostNetwork }} + dnsPolicy: ClusterFirstWithHostNet + {{- end }} + {{- if and (not .Values.useHostNetwork) .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end }} + {{- with .Values.shareProcessNamespace }} + shareProcessNamespace: {{ . }} + {{- end }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{- toYaml . | indent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + {{- with .Values.readinessGates }} + readinessGates: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "slime.serviceAccountName" . }} + {{- with .Values.podSecurityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + {{- with .Values.restartPolicy }} + restartPolicy: {{ . }} + {{- end }} + {{- with .Values.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ . }} + {{- end }} + {{- with .Values.progressDeadlineSeconds }} + progressDeadlineSeconds: {{ . }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: + {{- range .Values.topologySpreadConstraints }} + - maxSkew: {{ .maxSkew }} + topologyKey: {{ .topologyKey }} + whenUnsatisfiable: {{ .whenUnsatisfiable }} + labelSelector: + matchLabels: + {{- include "slime.selectorLabels" $root | nindent 14 }} + {{- end }} + {{- end }} + {{- with .Values.volumes }} + volumes: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + {{- if .Values.initContainers.enabled }} + initContainers: + {{- range $container := .Values.initContainers.containers }} + - name: {{ include "slime.fullname" $root }}-{{ $container.name }} + image: {{ $container.image.repository }}:{{ $container.image.tag }} + {{- with $container.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with $container.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $container.args }} + args: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $container.workingDir }} + workingDir: {{ . }} + {{- end }} + {{- with $container.env }} + env: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.envFrom }} + envFrom: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.volumeMounts }} + volumeMounts: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- end }} + {{- end }} + containers: + {{- range $container := .Values.containers }} + - name: {{ include "slime.fullname" $root }}-{{ $container.name }} + {{- with $container.securityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + image: {{ $container.image.repository }}:{{ $container.image.tag }} + {{- with $container.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with $container.ports }} + ports: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $container.args }} + args: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $container.workingDir }} + workingDir: {{ . }} + {{- end }} + {{- with $container.env }} + env: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.envFrom }} + envFrom: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.lifecycle }} + lifecycle: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $container.startupProbe }} + startupProbe: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.readinessProbe }} + readinessProbe: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.livenessProbe }} + livenessProbe: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- with $container.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with $container.volumeMounts }} + volumeMounts: + {{- tpl (. | toYaml) $root | nindent 12 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/http-service/templates/hpa.yaml b/http-service/templates/hpa.yaml new file mode 100644 index 00000000..2a89d6be --- /dev/null +++ b/http-service/templates/hpa.yaml @@ -0,0 +1,27 @@ +{{- $root := . -}} +{{- if .Values.autoscaling.enabled -}} +{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: autoscaling/v2 +{{- else }} +apiVersion: autoscaling/v2beta2 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "slime.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "slime.labels" . | nindent 4 }} +spec: + {{- with .Values.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} + metrics: + {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "slime.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} +{{- end }} diff --git a/http-service/templates/ingress.yaml b/http-service/templates/ingress.yaml new file mode 100644 index 00000000..6a814b52 --- /dev/null +++ b/http-service/templates/ingress.yaml @@ -0,0 +1,50 @@ +{{- if .Values.ingress.enabled }} +{{- $root := . }} +{{- range $name, $ingress := .Values.ingress.ingresses }} +{{- $fullName := include "slime.fullname" $root -}} +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + {{- with $ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "slime.labels" $root | nindent 4 }} + {{- with $ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: "{{ $fullName }}-{{ $name }}" + namespace: {{ $root.Release.Namespace }} +spec: + {{- if $ingress.ingressClassName }} + ingressClassName: {{ $ingress.ingressClassName }} + {{- end }} + {{- if $ingress.tls }} + tls: + {{- range $ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: "{{ .secretName }}" + {{- end }} + {{- end }} + rules: + {{- range $ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + pathType: {{ .pathType }} + backend: + service: + name: {{ $fullName }} + port: + number: {{ .portNumber }} + {{- end }} + {{- end }} +{{ end }} +{{- end }} diff --git a/http-service/templates/pdb.yaml b/http-service/templates/pdb.yaml new file mode 100644 index 00000000..5deb9526 --- /dev/null +++ b/http-service/templates/pdb.yaml @@ -0,0 +1,30 @@ +{{- if .Values.podDisruptionBudget.enabled }} +{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + {{- with .Values.podDisruptionBudget.annotations }} + annotations: + {{- toYaml . | nindent 4}} + {{- end }} + labels: + {{- include "slime.labels" . | nindent 4 }} + {{- with .Values.podDisruptionBudget.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "slime.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + {{- with .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} + {{- with .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "slime.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/http-service/templates/rbac.yaml b/http-service/templates/rbac.yaml new file mode 100644 index 00000000..c4f9f7b9 --- /dev/null +++ b/http-service/templates/rbac.yaml @@ -0,0 +1,72 @@ +{{- if .Values.clusterRole.enabled }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "slime.labels" . | nindent 4 }} + name: {{ include "slime.fullname" . }} +rules: + {{- with .Values.clusterRole.rules }} + {{- toYaml . | nindent 2 }} + {{- end }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "slime.labels" . | nindent 4 }} + name: {{ include "slime.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "slime.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "slime.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +{{- end }} +{{- if .Values.role.enabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "slime.labels" . | nindent 4 }} + name: {{ include "slime.fullname" . }} + namespace: {{ .Release.Namespace }} +rules: + {{- with .Values.role.rules }} + {{- toYaml . | nindent 2 }} + {{- end }} +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "slime.labels" . | nindent 4 }} + name: {{ include "slime.fullname" . }} + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "slime.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "slime.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +{{- end }} +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} + labels: + {{- include "slime.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "slime.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/http-service/templates/secret.yaml b/http-service/templates/secret.yaml new file mode 100644 index 00000000..43a7e5d3 --- /dev/null +++ b/http-service/templates/secret.yaml @@ -0,0 +1,36 @@ +{{- $root := . }} +{{- range $name, $value := .Values.secrets }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "slime.fullname" $root }}-{{ tpl $name $root }} + namespace: {{ $root.Release.Namespace }} + labels: + {{- include "slime.labels" $root | nindent 4 }} + {{- with $value.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + checksum/secret: '{{ toYaml $value | sha256sum }}' + {{- with $value.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if and (semverCompare ">=1.19-0" $root.Capabilities.KubeVersion.GitVersion) $value.immutable }} +immutable: {{ $value.immutable }} +{{- end }} +{{- with $value.data }} +data: +{{- range $k, $v := $value.data }} + {{ $k }}: {{ . | b64enc | quote }} +{{- end }} +{{- with $value.stringData }} +stringData: + {{- . | toYaml | nindent 2 }} +{{- end }} +{{- with $value.type }} +type: + {{- . | toYaml | nindent 2 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/http-service/templates/service.yaml b/http-service/templates/service.yaml new file mode 100644 index 00000000..eefb704c --- /dev/null +++ b/http-service/templates/service.yaml @@ -0,0 +1,34 @@ +{{- if .Values.service.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "slime.fullname" . }} + namespace: {{ .Release.Namespace }} + {{- with .Values.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + {{- include "slime.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + ports: + {{- range $name, $v := .Values.service.ports }} + - name: {{ $name }} + targetPort: {{ $v.targetPort }} + port: {{ $v.port }} + {{- if and (eq $root.Values.service.type "NodePort") $v.nodePort }} + nodePort: {{ $v.nodePort }} + {{- end }} + protocol: {{ $v.protocol }} + {{- end }} + selector: + {{- include "slime.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/http-service/templates/tests/test.yaml b/http-service/templates/tests/test.yaml new file mode 100644 index 00000000..208374e6 --- /dev/null +++ b/http-service/templates/tests/test.yaml @@ -0,0 +1,45 @@ +{{- if .Values.test.enabled -}} +{{- $root := . }} +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "slime.fullname" . }}-test" + namespace: {{ .Release.Namespace }} + labels: + helm.sh/chart: {{ include "slime.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + annotations: + "helm.sh/hook": test +spec: + containers: + {{- range $container := .Values.test.containers }} + - name: {{ include "slime.fullname" $root }}-{{ $container.name }}-test + image: {{ $container.image.repository }}:{{ $container.image.tag }} + {{- with $container.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- with $container.command }} + command: + {{- toYaml . | nindent 8 }} + {{- end }} + env: + - name: RELEASE_NAME + value: {{ $root.Release.Name }} + - name: NAMESPACE + value: {{ $root.Release.Namespace }} + - name: APP_FULLNAME + value: {{ include "slime.fullname" $root }} + {{- with $container.env }} + {{- toYaml $container.env | nindent 8 }} + {{- end }} + {{- with $container.envFrom }} + envFrom: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + {{- with $container.resources }} + resources: + {{- tpl (. | toYaml) $root | nindent 8 }} + {{- end }} + {{- end }} + restartPolicy: Never +{{- end }} diff --git a/http-service/values.yaml b/http-service/values.yaml new file mode 100644 index 00000000..efca4628 --- /dev/null +++ b/http-service/values.yaml @@ -0,0 +1,329 @@ +# Default values for example. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +nameOverride: # Override name of app + +fullnameOverride: # Override the full qualified app name + +deployment: + enabled: false # Enable deployment + +strategy: {} # rolling update strategy for deployment + +annotations: {} # annotations for deployment + +labels: {} # labels for deployment +# name: value + +# If you want fix replicaCount, disable autoscaling.enabled and uncomment the following line +#replicas: 1 # replicas for deployment + +revisionHistoryLimit: "" # revisionHistoryLimit + +podAnnotations: {} # pod annotations + +podLabels: {} # pod labels + +podSecurityContext: {} # pod securityContext + +affinity: {} # affinity + +nodeSelector: {} # nodeSelector + +imagePullSecrets: [] # imagePullSecrets + +readinessGates: [] # readinessGates + +priorityClassName: "" # priorityClassName + +progressDeadlineSeconds: "" # progressDeadlineSeconds + +volumes: [] # pod volumes(initContainers, containers) + +containers: [] # application containers +# - name: app1 +# +# image: +# repository: chatwork/app1 +# tag: latest +# pullPolicy: Always +# +# command: [] +# args: [] +# +# securityContext: {} +# +# ports: [] +# workingDir: /workingDir +# +# env: [] +# # - name: DEMO_GREETING +# # value: "Hello from the environment" +# +# envFrom: [] +# # - configMapRef: +# # name: special-config +# +# lifecycle: {} +# +# startupProbe: {} +# #httpGet: {} +# # scheme: HTTP +# # path: [your http path] +# # port: [your http port] +# #tcpSocket: {} +# # port: [your tcp port] +# #exec: {} +# # command: [your command] +# #initialDelaySeconds: 30 +# #periodSeconds: 5 +# #timeoutSeconds: 5 +# #successThreshold: 1 +# #failureThreshold: 3 +# +# readinessProbe: {} +# #httpGet: {} +# # scheme: HTTP +# # path: [your http path] +# # port: [your http port] +# #tcpSocket: {} +# # port: [your tcp port] +# #exec: {} +# # command: [your command] +# #initialDelaySeconds: 60 +# #periodSeconds: 5 +# #timeoutSeconds: 5 +# #successThreshold: 1 +# #failureThreshold: 3 +# +# livenessProbe: {} +# # If true, enable the liveness probe. +# #httpGet: {} +# # scheme: HTTP +# # path: [your http path] +# # port: [your http port] +# #tcpSocket: {} +# # port: [your tcp port] +# #exec: {} +# # command: [your command] +# #initialDelaySeconds: 60 +# #periodSeconds: 5 +# #timeoutSeconds: 5 +# #successThreshold: 1 +# #failureThreshold: 3 +# +# resources: {} +# +# volumeMounts: [] +# # - name: volume-name +# # mountPath: /path/to + +initContainers: + enabled: false # if true, you can use initContainers + + containers: [] # initContainers config + # - name: init + # + # image: + # repository: chatwork/init + # tag: latest + # + # command: [] + # args: [] + # + # env: [] + # # - name: DEMO_GREETING + # # value: "Hello from the environment" + # + # envFrom: [] + # # - configMapRef: + # # name: special-config + # volumeMounts: [] + # # - name: volume-name + # # mountPath: /path/to + +configmaps: {} # transform ConfigMap manifest. You can set `binaryData`, `data` +# your-configmap-name: +# labels: +# a: b +# annotations: +# foo: bar +# data: +# slime: '₍Ꙭ̂₎' + +secrets: {} # transform Secret's manifest. You can set `data`, `stringData` and `type` +# your-secret-name: +# annotations: +# foo: bar +# data: +# slime: 4oKN6pmszILigo4K + +autoscaling: + enabled: false # if true, you can use hpa + behavior: {} # autscaling behavior https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior + metrics: [] # autoscaling metrics + maxReplicas: 2 # autoscaling maxReplicas + minReplicas: 1 # autoscaling minReplicas + +service: + enabled: false # if true, you can use service + type: ClusterIP # service type(ClusterIP, NodePort, LoadBalancer) + ports: {} # service ports + # http: + # # container port + # targetPort: 8080 + # # svc port + # port: 80 + # protocol: TCP + +clusterRole: + enabled: false # if true, you can use clusterRole + rules: [] # clusterRole rules + # - apiGroups: + # - "" + # resources: + # - nodes + # verbs: + # - get + # - list + # - watch + +role: + enabled: false # if true, you can use role + rules: [] # role rules + # - apiGroups: + # - "" + # resources: + # - pods + # verbs: + # - watch + +serviceAccount: + create: false # if true, you can create serviceAccount + name: # if you create serviceAccount, you can set name + labels: {} # service account labels + annotations: {} # serviceAccount annotations + +podDisruptionBudget: + enabled: false # if ture, you can use podDisruptionBudget + annotations: {} # podDisruptionBudget annotations + labels: {} # podDisruptionBudget labels + maxUnavailable: # podDisruptionBudget maxUnavailable + minAvailable: # podDisruptionBudget minAvailable + +ingress: + enabled: false # if true, you can use ingress + ingresses: {} # ingresses config + # your-ingress-name: + # annotations: {} + # labels: {} + # tls: [] + # - hosts: [] + # secretName: "" + # hosts: [] + # - host: + # paths: + # path: "/*" + # pathType: ImplementationSpecific + # portNumber: 80 + +test: + enabled: false # if true, you can use helm test + containers: [] # helm test container config + #- name: app1-test + # + # image: + # repository: curl + # tag: latest + # pullPolicy: Always + # # you can use environment vairables $RELEASE_NAME, $NAMESPACE, $APP_FULLNAME + # command: ["curl","$RELEASE_NAME.$NAMESPACE"] + # + # env: [] + # + # envFrom: [] + +# +# If you use CronJob, configure the following sections +# +# CronJob and deployment keys are completely separated so as not to mix them up. +# +cronJob: + enabled: false # Enable CronJob + +cronJobContainers: [] +# - name: job-app +# image: +# repository: hello-world +# tag: latest +# pullPolicy: IfNotPresent +# command: ["/hello"] +# env: [] +# envFrom: [] +# restartPolicy: "OnFailure" + +cronJobInitContainers: + enabled: false + containers: [] + # - name: init + # + # image: + # repository: chatwork/init + # tag: latest + # + # command: [] + # args: [] + # + # env: [] + # # - name: DEMO_GREETING + # # value: "Hello from the environment" + # + # envFrom: [] + # # - configMapRef: + # # name: special-config + # volumeMounts: [] + # # - name: volume-name + # # mountPath: /path/to + +cronJobVolumes: [] # CronJob's pod volumes(initContainers, containers) + +extraCronJobVolumes: [] # CronJob's pod extra volumes. Use when you want to add volume other than the common settings for each application. (initContainers, containers) + +cronJobImagePullSecrets: [] # annotations for imagePullSecrets + +cronJobAnnotations: {} # annotations for CronJob + +cronJobLabels: {} # labels for CronJob + +concurrencyPolicy: Allow + +failedJobsHistoryLimit: 1 + +schedule: "" # Schedule to run CronJob + +#startingDeadlineSeconds: + +successfulJobsHistoryLimit: 3 + +suspend: "false" + +# Available in 1.27 or later +# https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-job-limitations +#timeZone: "Etc/UTC" + +cronJobTemplateAnnotations: {} + +cronJobTemplateLabels: {} + +cronJobPodAnnotations: {} + +cronJobPodLabels: {} + +cronJobAffinity: {} # affinity for CronJob + +cronJobDnsConfig: {} + +cronJobTopologySpreadConstraints: + enabled: false + constraints: [] From 76edd20cf635e02858c9686c9c001bd8be96bf95 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 12:47:47 +0900 Subject: [PATCH 02/44] Remove CronJob configuration from http-service values HTTP services don't need CronJob support. This keeps the chart focused on its use case. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 83 ---------------------------------------- 1 file changed, 83 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index efca4628..1e8cbc16 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -244,86 +244,3 @@ test: # # envFrom: [] -# -# If you use CronJob, configure the following sections -# -# CronJob and deployment keys are completely separated so as not to mix them up. -# -cronJob: - enabled: false # Enable CronJob - -cronJobContainers: [] -# - name: job-app -# image: -# repository: hello-world -# tag: latest -# pullPolicy: IfNotPresent -# command: ["/hello"] -# env: [] -# envFrom: [] -# restartPolicy: "OnFailure" - -cronJobInitContainers: - enabled: false - containers: [] - # - name: init - # - # image: - # repository: chatwork/init - # tag: latest - # - # command: [] - # args: [] - # - # env: [] - # # - name: DEMO_GREETING - # # value: "Hello from the environment" - # - # envFrom: [] - # # - configMapRef: - # # name: special-config - # volumeMounts: [] - # # - name: volume-name - # # mountPath: /path/to - -cronJobVolumes: [] # CronJob's pod volumes(initContainers, containers) - -extraCronJobVolumes: [] # CronJob's pod extra volumes. Use when you want to add volume other than the common settings for each application. (initContainers, containers) - -cronJobImagePullSecrets: [] # annotations for imagePullSecrets - -cronJobAnnotations: {} # annotations for CronJob - -cronJobLabels: {} # labels for CronJob - -concurrencyPolicy: Allow - -failedJobsHistoryLimit: 1 - -schedule: "" # Schedule to run CronJob - -#startingDeadlineSeconds: - -successfulJobsHistoryLimit: 3 - -suspend: "false" - -# Available in 1.27 or later -# https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-job-limitations -#timeZone: "Etc/UTC" - -cronJobTemplateAnnotations: {} - -cronJobTemplateLabels: {} - -cronJobPodAnnotations: {} - -cronJobPodLabels: {} - -cronJobAffinity: {} # affinity for CronJob - -cronJobDnsConfig: {} - -cronJobTopologySpreadConstraints: - enabled: false - constraints: [] From 0a6b1a652961718ace034495ac860efade431096 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 12:49:54 +0900 Subject: [PATCH 03/44] Rename slime to http-service in Chart.yaml and _helpers.tpl Co-Authored-By: Claude Opus 4.6 --- http-service/Chart.yaml | 19 +++---------------- http-service/templates/_helpers.tpl | 20 ++++++++++---------- 2 files changed, 13 insertions(+), 26 deletions(-) diff --git a/http-service/Chart.yaml b/http-service/Chart.yaml index 87d009d1..f666aef8 100644 --- a/http-service/Chart.yaml +++ b/http-service/Chart.yaml @@ -1,18 +1,5 @@ apiVersion: v2 -name: slime -description: ₍Ꙭ̂₎ < Not my bad slime - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. +name: http-service +description: An opinionated Helm chart for language-agnostic HTTP services with sensible defaults type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.3.1 +version: 0.1.0 diff --git a/http-service/templates/_helpers.tpl b/http-service/templates/_helpers.tpl index f45c347f..67aa371a 100644 --- a/http-service/templates/_helpers.tpl +++ b/http-service/templates/_helpers.tpl @@ -1,7 +1,7 @@ {{/* Expand the name of the chart. */}} -{{- define "slime.name" -}} +{{- define "http-service.name" -}} {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} {{- end }} @@ -10,7 +10,7 @@ Create a default fully qualified app name. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). If release name contains chart name it will be used as a full name. */}} -{{- define "slime.fullname" -}} +{{- define "http-service.fullname" -}} {{- if .Values.fullnameOverride }} {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} {{- else }} @@ -26,33 +26,33 @@ If release name contains chart name it will be used as a full name. {{/* Create chart name and version as used by the chart label. */}} -{{- define "slime.chart" -}} +{{- define "http-service.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} {{- end }} {{/* Common labels */}} -{{- define "slime.labels" -}} -helm.sh/chart: {{ include "slime.chart" . }} -{{ include "slime.selectorLabels" . }} +{{- define "http-service.labels" -}} +helm.sh/chart: {{ include "http-service.chart" . }} +{{ include "http-service.selectorLabels" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} {{- end }} {{/* Selector labels */}} -{{- define "slime.selectorLabels" -}} -app.kubernetes.io/name: {{ include "slime.name" . }} +{{- define "http-service.selectorLabels" -}} +app.kubernetes.io/name: {{ include "http-service.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} {{- end }} {{/* Create the name of the service account to use */}} -{{- define "slime.serviceAccountName" -}} +{{- define "http-service.serviceAccountName" -}} {{- if .Values.serviceAccount.create }} -{{- default (include "slime.fullname" .) .Values.serviceAccount.name }} +{{- default (include "http-service.fullname" .) .Values.serviceAccount.name }} {{- else }} {{- default "default" .Values.serviceAccount.name }} {{- end }} From 41b52f4e68bbfc8a7cc16569a5bc217e9cf63964 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 14:31:47 +0900 Subject: [PATCH 04/44] Replace free-form containers array with structured single-container config The http-service chart assumes a single main container per pod. - image, containerPort, env, resources, etc. are now top-level values - Health check probes (startup, liveness, readiness) shown as commented samples; startup and liveness are required in the template - extraContainers escape hatch for sidecar use cases - initContainers kept as-is for DB migrations and config prep Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 117 +++++++++++++-------------------------- 1 file changed, 40 insertions(+), 77 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index 1e8cbc16..d0e91773 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -41,83 +41,46 @@ progressDeadlineSeconds: "" # progressDeadlineSeconds volumes: [] # pod volumes(initContainers, containers) -containers: [] # application containers -# - name: app1 -# -# image: -# repository: chatwork/app1 -# tag: latest -# pullPolicy: Always -# -# command: [] -# args: [] -# -# securityContext: {} -# -# ports: [] -# workingDir: /workingDir -# -# env: [] -# # - name: DEMO_GREETING -# # value: "Hello from the environment" -# -# envFrom: [] -# # - configMapRef: -# # name: special-config -# -# lifecycle: {} -# -# startupProbe: {} -# #httpGet: {} -# # scheme: HTTP -# # path: [your http path] -# # port: [your http port] -# #tcpSocket: {} -# # port: [your tcp port] -# #exec: {} -# # command: [your command] -# #initialDelaySeconds: 30 -# #periodSeconds: 5 -# #timeoutSeconds: 5 -# #successThreshold: 1 -# #failureThreshold: 3 -# -# readinessProbe: {} -# #httpGet: {} -# # scheme: HTTP -# # path: [your http path] -# # port: [your http port] -# #tcpSocket: {} -# # port: [your tcp port] -# #exec: {} -# # command: [your command] -# #initialDelaySeconds: 60 -# #periodSeconds: 5 -# #timeoutSeconds: 5 -# #successThreshold: 1 -# #failureThreshold: 3 -# -# livenessProbe: {} -# # If true, enable the liveness probe. -# #httpGet: {} -# # scheme: HTTP -# # path: [your http path] -# # port: [your http port] -# #tcpSocket: {} -# # port: [your tcp port] -# #exec: {} -# # command: [your command] -# #initialDelaySeconds: 60 -# #periodSeconds: 5 -# #timeoutSeconds: 5 -# #successThreshold: 1 -# #failureThreshold: 3 -# -# resources: {} -# -# volumeMounts: [] -# # - name: volume-name -# # mountPath: /path/to +# -- Main container image +image: + repository: "" + tag: "" + pullPolicy: IfNotPresent + +containerPort: 8080 # port exposed by the main container + +command: [] # container command override +args: [] # container args override +env: [] # environment variables +envFrom: [] # environment variable sources +resources: {} # container resources (limits/requests) +securityContext: {} # container security context +volumeMounts: [] # container volume mounts + +# -- Health check probes. +# startupProbe and livenessProbe are required in the template (rendering fails if not set). +# readinessProbe is optional. +#startupProbe: +# httpGet: +# path: /healthz +# port: 8080 +# failureThreshold: 5 +# initialDelaySeconds: 10 +# periodSeconds: 1 +#livenessProbe: +# httpGet: +# path: /healthz +# port: 8080 +# failureThreshold: 2 +# periodSeconds: 10 +#readinessProbe: +# httpGet: +# path: /ready +# port: 8080 +# periodSeconds: 5 +# failureThreshold: 3 + +extraContainers: [] # additional sidecar containers (escape hatch) initContainers: enabled: false # if true, you can use initContainers From 8ea7c8247652d25d6e565c9539485f6147395c5d Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 14:33:51 +0900 Subject: [PATCH 05/44] Remove deployment toggle and set RollingUpdate strategy defaults This chart always creates a Deployment, so the enabled flag is removed. Default strategy set to RollingUpdate with maxSurge 25% and maxUnavailable 1, matching the most common pattern across services. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index d0e91773..bef709f3 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -6,10 +6,11 @@ nameOverride: # Override name of app fullnameOverride: # Override the full qualified app name -deployment: - enabled: false # Enable deployment - -strategy: {} # rolling update strategy for deployment +strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: "25%" + maxUnavailable: 1 annotations: {} # annotations for deployment From 916f96d0bcdb5b7aad3ee99d4aa2b070b418df14 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 14:36:14 +0900 Subject: [PATCH 06/44] Enable PodDisruptionBudget by default with maxUnavailable 1 PDB is enabled out of the box to protect service availability during node drains. maxUnavailable (not minAvailable) is used as the default to remain safe even with single-replica deployments. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index bef709f3..27673121 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -170,10 +170,10 @@ serviceAccount: annotations: {} # serviceAccount annotations podDisruptionBudget: - enabled: false # if ture, you can use podDisruptionBudget + enabled: true # PDB enabled by default to protect availability annotations: {} # podDisruptionBudget annotations labels: {} # podDisruptionBudget labels - maxUnavailable: # podDisruptionBudget maxUnavailable + maxUnavailable: 1 # allow at most 1 pod unavailable during disruptions minAvailable: # podDisruptionBudget minAvailable ingress: From fd3c7f65352bf74f111b73a463bdb759b54221f3 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 14:41:15 +0900 Subject: [PATCH 07/44] Split ingress into private and public sections Replace the generic ingresses map with explicit private/public ingress sections. Each has its own enabled flag, allowing services to declare which ALB types they need. The template will auto-generate appropriate ALB annotations for each type. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index 27673121..159df5c4 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -177,20 +177,31 @@ podDisruptionBudget: minAvailable: # podDisruptionBudget minAvailable ingress: - enabled: false # if true, you can use ingress - ingresses: {} # ingresses config - # your-ingress-name: - # annotations: {} - # labels: {} - # tls: [] - # - hosts: [] - # secretName: "" - # hosts: [] - # - host: - # paths: - # path: "/*" - # pathType: ImplementationSpecific - # portNumber: 80 + # -- Private ingress (internal ALB) + private: + enabled: false + annotations: {} + labels: {} + hosts: [] + # - host: api.internal.example.com + # paths: + # - path: "/*" + # pathType: ImplementationSpecific + # portNumber: 80 + tls: [] + + # -- Public ingress (internet-facing ALB) + public: + enabled: false + annotations: {} + labels: {} + hosts: [] + # - host: api.example.com + # paths: + # - path: "/*" + # pathType: ImplementationSpecific + # portNumber: 80 + tls: [] test: enabled: false # if true, you can use helm test From 61db4179a76b5fec47c77e27057d493070291287 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 14:42:43 +0900 Subject: [PATCH 08/44] Add gracefulShutdown configuration as required value Commented-out sample showing the required gracefulShutdown.seconds value. The template will use this to set terminationGracePeriodSeconds and generate a preStop lifecycle hook for ALB deregistration delay. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/http-service/values.yaml b/http-service/values.yaml index 159df5c4..495d3270 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -50,6 +50,12 @@ image: containerPort: 8080 # port exposed by the main container +# -- Graceful shutdown configuration (required). +# The template uses this to set terminationGracePeriodSeconds and +# generate a preStop lifecycle hook (sleep for deregistration delay). +#gracefulShutdown: +# seconds: 30 + command: [] # container command override args: [] # container args override env: [] # environment variables From a9889e8f7fa7e382fb92171df656aa0ef2d1246b Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 14:51:22 +0900 Subject: [PATCH 09/44] Add nodeAffinity key/value configuration Provide nodeAffinity.key and nodeAffinity.value inputs for generating requiredDuringSchedulingIgnoredDuringExecution node affinity rules. Concrete defaults (e.g. ARM node groups) are injected via Helmfile settings, keeping the chart vendor-neutral. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/http-service/values.yaml b/http-service/values.yaml index 495d3270..fb325a8f 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -30,6 +30,14 @@ podSecurityContext: {} # pod securityContext affinity: {} # affinity +# -- Node affinity for targeting specific node groups. +# Set key and value to generate a +# requiredDuringSchedulingIgnoredDuringExecution node affinity rule. +# Concrete defaults (e.g. ARM nodes) are set in Helmfile settings. +#nodeAffinity: +# key: "" +# value: "" + nodeSelector: {} # nodeSelector imagePullSecrets: [] # imagePullSecrets From 3a01256f677c642ed0c906a5874090f774731fd8 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 14:56:33 +0900 Subject: [PATCH 10/44] Add Datadog unified service tags configuration Datadog integration is enabled by default, auto-generating tags.datadoghq.com/{env,service,version} labels on Deployment and Pod. When enabled, env/service/version are required values. Services using alternative observability (e.g. OpenTelemetry) can set enabled to false. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/http-service/values.yaml b/http-service/values.yaml index fb325a8f..05e34e0f 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -22,6 +22,16 @@ labels: {} # labels for deployment revisionHistoryLimit: "" # revisionHistoryLimit +# -- Datadog unified service tags. +# When enabled, the template auto-generates tags.datadoghq.com/{env,service,version} +# labels on both the Deployment and Pod. Set enabled to false for services +# using alternative observability (e.g. OpenTelemetry). +datadog: + enabled: true + #env: "" # required when enabled + #service: "" # required when enabled + #version: "" # required when enabled + podAnnotations: {} # pod annotations podLabels: {} # pod labels From 2b7c9373bd346d14ae05c94eaea7f99d5a1e9c28 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:06:11 +0900 Subject: [PATCH 11/44] Restructure annotations and labels under deployment/pod namespaces Rename flat annotations/labels/podAnnotations/podLabels to namespaced deployment.{annotations,labels} and pod.{annotations,labels,securityContext} for consistency with other resource sections (serviceAccount, PDB, ingress). Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index 05e34e0f..b61c0f9d 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -12,10 +12,14 @@ strategy: maxSurge: "25%" maxUnavailable: 1 -annotations: {} # annotations for deployment +# -- Reloader annotation for auto-restarting on ConfigMap/Secret changes. +# When enabled, adds reloader.stakater.com/auto: "true" to Deployment annotations. +reloader: + enabled: true -labels: {} # labels for deployment -# name: value +deployment: + annotations: {} + labels: {} # If you want fix replicaCount, disable autoscaling.enabled and uncomment the following line #replicas: 1 # replicas for deployment @@ -32,11 +36,10 @@ datadog: #service: "" # required when enabled #version: "" # required when enabled -podAnnotations: {} # pod annotations - -podLabels: {} # pod labels - -podSecurityContext: {} # pod securityContext +pod: + annotations: {} + labels: {} + securityContext: {} affinity: {} # affinity From cd4f122f77efc88554cf398157bd200b39b0f37b Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:08:48 +0900 Subject: [PATCH 12/44] Clean up slime references in configmaps/secrets samples Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index b61c0f9d..022af5a7 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -141,14 +141,14 @@ configmaps: {} # transform ConfigMap manifest. You can set `binaryData`, `data` # annotations: # foo: bar # data: -# slime: '₍Ꙭ̂₎' +# key: value secrets: {} # transform Secret's manifest. You can set `data`, `stringData` and `type` # your-secret-name: # annotations: # foo: bar # data: -# slime: 4oKN6pmszILigo4K +# key: base64-encoded-value autoscaling: enabled: false # if true, you can use hpa From f71dac4cc29d7b6bb45d106e9fe50c63f06b3de0 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:46:05 +0900 Subject: [PATCH 13/44] Rename slime to http-service and remove deployment.enabled guard The http-service chart always creates a Deployment, so the conditional guard is removed. All slime helper references updated to http-service. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index e5330786..b426b07c 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -1,16 +1,15 @@ -{{- if .Values.deployment.enabled -}} {{- $root := . }} apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "slime.fullname" . }} + name: {{ include "http-service.fullname" . }} namespace: {{ .Release.Namespace }} {{- with .Values.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} labels: - {{- include "slime.labels" . | nindent 4 }} + {{- include "http-service.labels" . | nindent 4 }} {{- with .Values.labels }} {{- toYaml . | nindent 4 }} {{- end }} @@ -20,7 +19,7 @@ spec: {{- end }} selector: matchLabels: - {{- include "slime.selectorLabels" . | nindent 6 }} + {{- include "http-service.selectorLabels" . | nindent 6 }} {{- with .Values.deployment.strategy }} strategy: {{- toYaml . | nindent 4 }} @@ -37,7 +36,7 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} labels: - {{- include "slime.selectorLabels" . | nindent 8 }} + {{- include "http-service.selectorLabels" . | nindent 8 }} {{- with .Values.podLabels }} {{- toYaml . | nindent 8 }} {{- end }} @@ -68,7 +67,7 @@ spec: readinessGates: {{- tpl (. | toYaml) $root | nindent 8 }} {{- end }} - serviceAccountName: {{ include "slime.serviceAccountName" . }} + serviceAccountName: {{ include "http-service.serviceAccountName" . }} {{- with .Values.podSecurityContext }} securityContext: {{- toYaml . | nindent 8 }} @@ -101,7 +100,7 @@ spec: whenUnsatisfiable: {{ .whenUnsatisfiable }} labelSelector: matchLabels: - {{- include "slime.selectorLabels" $root | nindent 14 }} + {{- include "http-service.selectorLabels" $root | nindent 14 }} {{- end }} {{- end }} {{- with .Values.volumes }} @@ -111,7 +110,7 @@ spec: {{- if .Values.initContainers.enabled }} initContainers: {{- range $container := .Values.initContainers.containers }} - - name: {{ include "slime.fullname" $root }}-{{ $container.name }} + - name: {{ include "http-service.fullname" $root }}-{{ $container.name }} image: {{ $container.image.repository }}:{{ $container.image.tag }} {{- with $container.image.pullPolicy }} imagePullPolicy: {{ . }} @@ -143,7 +142,7 @@ spec: {{- end }} containers: {{- range $container := .Values.containers }} - - name: {{ include "slime.fullname" $root }}-{{ $container.name }} + - name: {{ include "http-service.fullname" $root }}-{{ $container.name }} {{- with $container.securityContext }} securityContext: {{- toYaml . | nindent 12 }} @@ -200,4 +199,3 @@ spec: {{- tpl (. | toYaml) $root | nindent 12 }} {{- end }} {{- end }} -{{- end }} From a06138818c83acf39c836e938e61daa695913e02 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:46:36 +0900 Subject: [PATCH 14/44] Update values reference paths in deployment template Align template references with the restructured values.yaml namespaces: - annotations/labels -> deployment.annotations/labels - podAnnotations/podLabels -> pod.annotations/labels - podSecurityContext -> pod.securityContext Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index b426b07c..3d203c55 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -4,13 +4,13 @@ kind: Deployment metadata: name: {{ include "http-service.fullname" . }} namespace: {{ .Release.Namespace }} - {{- with .Values.annotations }} + {{- with .Values.deployment.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} labels: {{- include "http-service.labels" . | nindent 4 }} - {{- with .Values.labels }} + {{- with .Values.deployment.labels }} {{- toYaml . | nindent 4 }} {{- end }} spec: @@ -32,12 +32,12 @@ spec: annotations: checksum/configmap: '{{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}' checksum/secret: '{{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}' - {{- with .Values.podAnnotations }} + {{- with .Values.pod.annotations }} {{- toYaml . | nindent 8 }} {{- end }} labels: {{- include "http-service.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} + {{- with .Values.pod.labels }} {{- toYaml . | nindent 8 }} {{- end }} spec: @@ -68,7 +68,7 @@ spec: {{- tpl (. | toYaml) $root | nindent 8 }} {{- end }} serviceAccountName: {{ include "http-service.serviceAccountName" . }} - {{- with .Values.podSecurityContext }} + {{- with .Values.pod.securityContext }} securityContext: {{- toYaml . | nindent 8 }} {{- end }} From 11a1a4104a19708cbda2c28e43a8d37ebc4a0b87 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:48:06 +0900 Subject: [PATCH 15/44] Always render strategy in deployment template Strategy always has a default value (RollingUpdate), so the conditional with-guard is replaced with unconditional rendering. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 3d203c55..e7ee1db6 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -20,10 +20,8 @@ spec: selector: matchLabels: {{- include "http-service.selectorLabels" . | nindent 6 }} - {{- with .Values.deployment.strategy }} strategy: - {{- toYaml . | nindent 4 }} - {{- end }} + {{- toYaml .Values.strategy | nindent 4 }} {{- with .Values.revisionHistoryLimit }} revisionHistoryLimit: {{ . }} {{- end }} From ebed48b8e29dae9f90b67e5b6fa2ca349cc41332 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:49:52 +0900 Subject: [PATCH 16/44] Auto-generate Reloader annotation on Deployment When reloader.enabled is true, the template adds reloader.stakater.com/auto: "true" to Deployment annotations for automatic pod restart on ConfigMap/Secret changes. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index e7ee1db6..d5626475 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -4,10 +4,13 @@ kind: Deployment metadata: name: {{ include "http-service.fullname" . }} namespace: {{ .Release.Namespace }} - {{- with .Values.deployment.annotations }} annotations: + {{- if .Values.reloader.enabled }} + reloader.stakater.com/auto: "true" + {{- end }} + {{- with .Values.deployment.annotations }} {{- toYaml . | nindent 4 }} - {{- end }} + {{- end }} labels: {{- include "http-service.labels" . | nindent 4 }} {{- with .Values.deployment.labels }} From 86a37224eaecca28c0c7cef1a1bba129eb6ac516 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:54:00 +0900 Subject: [PATCH 17/44] Auto-generate Datadog unified service tag labels When datadog.enabled is true, tags.datadoghq.com/{env,service,version} labels are added to both Deployment metadata and Pod template metadata. All three values are validated with required to fail early on missing configuration. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index d5626475..2c46d600 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -13,6 +13,11 @@ metadata: {{- end }} labels: {{- include "http-service.labels" . | nindent 4 }} + {{- if .Values.datadog.enabled }} + tags.datadoghq.com/env: {{ required "datadog.env is required when datadog.enabled is true" .Values.datadog.env }} + tags.datadoghq.com/service: {{ required "datadog.service is required when datadog.enabled is true" .Values.datadog.service }} + tags.datadoghq.com/version: {{ required "datadog.version is required when datadog.enabled is true" .Values.datadog.version | quote }} + {{- end }} {{- with .Values.deployment.labels }} {{- toYaml . | nindent 4 }} {{- end }} @@ -38,6 +43,11 @@ spec: {{- end }} labels: {{- include "http-service.selectorLabels" . | nindent 8 }} + {{- if .Values.datadog.enabled }} + tags.datadoghq.com/env: {{ required "datadog.env is required when datadog.enabled is true" .Values.datadog.env }} + tags.datadoghq.com/service: {{ required "datadog.service is required when datadog.enabled is true" .Values.datadog.service }} + tags.datadoghq.com/version: {{ required "datadog.version is required when datadog.enabled is true" .Values.datadog.version | quote }} + {{- end }} {{- with .Values.pod.labels }} {{- toYaml . | nindent 8 }} {{- end }} From 5e8e4b93dd586bd85478cac3d93d84b565bdc746 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 15:55:51 +0900 Subject: [PATCH 18/44] Replace containers loop with single-container definition Remove the generic containers range loop in favor of a single main container referencing top-level values (image, containerPort, env, etc). tpl calls removed for simplicity. extraContainers appended after the main container as an escape hatch for sidecars. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 59 +++++++++++--------------- 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 2c46d600..be4363a3 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -152,61 +152,52 @@ spec: {{- end }} {{- end }} containers: - {{- range $container := .Values.containers }} - - name: {{ include "http-service.fullname" $root }}-{{ $container.name }} - {{- with $container.securityContext }} + - name: {{ include "http-service.fullname" . }} + image: {{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- with .Values.securityContext }} securityContext: {{- toYaml . | nindent 12 }} {{- end }} - image: {{ $container.image.repository }}:{{ $container.image.tag }} - {{- with $container.image.pullPolicy }} - imagePullPolicy: {{ . }} - {{- end }} - {{- with $container.ports }} ports: - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- with $container.command }} + - containerPort: {{ .Values.containerPort }} + protocol: TCP + {{- with .Values.command }} command: {{- toYaml . | nindent 12 }} {{- end }} - {{- with $container.args }} + {{- with .Values.args }} args: {{- toYaml . | nindent 12 }} {{- end }} - {{- with $container.workingDir }} - workingDir: {{ . }} - {{- end }} - {{- with $container.env }} + {{- with .Values.env }} env: - {{- tpl (. | toYaml) $root | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} - {{- with $container.envFrom }} + {{- with .Values.envFrom }} envFrom: - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- with $container.lifecycle }} - lifecycle: {{- toYaml . | nindent 12 }} {{- end }} - {{- with $container.startupProbe }} + {{- with .Values.startupProbe }} startupProbe: - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- with $container.readinessProbe }} - readinessProbe: - {{- tpl (. | toYaml) $root | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} - {{- with $container.livenessProbe }} + {{- with .Values.livenessProbe }} livenessProbe: - {{- tpl (. | toYaml) $root | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} - {{- with $container.resources }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.resources }} resources: {{- toYaml . | nindent 12 }} {{- end }} - {{- with $container.volumeMounts }} + {{- with .Values.volumeMounts }} volumeMounts: - {{- tpl (. | toYaml) $root | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} - {{- end }} + {{- with .Values.extraContainers }} + {{- toYaml . | nindent 8 }} + {{- end }} From 65c3f9d651f6dfb431d8716ae016fd86c4098d15 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:10:48 +0900 Subject: [PATCH 19/44] Redesign gracefulShutdown with explicit drain and app timeout values Split into trafficDrainSeconds (preStop sleep for LB deregistration) and appShutdownTimeoutSeconds (time the app gets after SIGTERM). terminationGracePeriodSeconds is auto-calculated as their sum. Added inline documentation explaining the preStop sleep rationale. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index 022af5a7..18d90942 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -72,10 +72,23 @@ image: containerPort: 8080 # port exposed by the main container # -- Graceful shutdown configuration (required). -# The template uses this to set terminationGracePeriodSeconds and -# generate a preStop lifecycle hook (sleep for deregistration delay). +# The template auto-generates: +# - preStop lifecycle hook: sleep for trafficDrainSeconds +# - terminationGracePeriodSeconds: trafficDrainSeconds + appShutdownTimeoutSeconds +# +# Timeline: +# |-- terminationGracePeriodSeconds (drain + app) --| +# |-- preStop sleep (drain) --|-- app shutdown ------| +# ^ +# SIGTERM +# +# Why preStop sleep? When a Pod terminates, Kubernetes removes it from +# Endpoints and runs preStop simultaneously. But the ALB/NLB target group +# deregistration takes time — during that gap, traffic still arrives. +# The sleep holds the container alive until routing fully stops. #gracefulShutdown: -# seconds: 30 +# trafficDrainSeconds: 25 # wait for LB deregistration to complete +# appShutdownTimeoutSeconds: 30 # time the app gets after SIGTERM command: [] # container command override args: [] # container args override From 399401e842e51c57db02477edccc699bfe546dcb Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:15:48 +0900 Subject: [PATCH 20/44] Add gracefulShutdown with terminationGracePeriodSeconds and preStop hook Both gracefulShutdown values are required: - trafficDrainSeconds: preStop sleep waiting for LB deregistration - appShutdownTimeoutSeconds: time the app gets after SIGTERM terminationGracePeriodSeconds is auto-calculated as their sum. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index be4363a3..47b0d7aa 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -97,9 +97,9 @@ spec: {{- with .Values.priorityClassName }} priorityClassName: {{ . }} {{- end }} - {{- with .Values.terminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ . }} - {{- end }} + {{- $drainSeconds := required "gracefulShutdown.trafficDrainSeconds is required" .Values.gracefulShutdown.trafficDrainSeconds }} + {{- $appSeconds := required "gracefulShutdown.appShutdownTimeoutSeconds is required" .Values.gracefulShutdown.appShutdownTimeoutSeconds }} + terminationGracePeriodSeconds: {{ add $drainSeconds $appSeconds }} {{- with .Values.progressDeadlineSeconds }} progressDeadlineSeconds: {{ . }} {{- end }} @@ -178,6 +178,12 @@ spec: envFrom: {{- toYaml . | nindent 12 }} {{- end }} + lifecycle: + preStop: + exec: + command: + - sleep + - {{ $drainSeconds | quote }} {{- with .Values.startupProbe }} startupProbe: {{- toYaml . | nindent 12 }} From 78457cf6c9a72a3dc46b86d1933046a9265e13e1 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:16:43 +0900 Subject: [PATCH 21/44] Make startupProbe and livenessProbe required Template rendering fails with a clear error message if either probe is not configured. readinessProbe remains optional. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 47b0d7aa..c75aff10 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -184,14 +184,10 @@ spec: command: - sleep - {{ $drainSeconds | quote }} - {{- with .Values.startupProbe }} startupProbe: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.livenessProbe }} + {{- toYaml (required "startupProbe is required" .Values.startupProbe) | nindent 12 }} livenessProbe: - {{- toYaml . | nindent 12 }} - {{- end }} + {{- toYaml (required "livenessProbe is required" .Values.livenessProbe) | nindent 12 }} {{- with .Values.readinessProbe }} readinessProbe: {{- toYaml . | nindent 12 }} From dd55293c61d0150f8d9022740dc50a007707889f Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:23:36 +0900 Subject: [PATCH 22/44] Simplify affinity to plain passthrough, remove nodeAffinity auto-generation The chart passes affinity config through as-is via toYaml. Concrete node affinity rules (e.g. ARM node groups) are injected by Helmfile settings, keeping the chart vendor-neutral and simple. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 2 +- http-service/values.yaml | 10 +--------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index c75aff10..8e63ff7f 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -85,7 +85,7 @@ spec: {{- end }} {{- with .Values.affinity }} affinity: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.tolerations }} tolerations: diff --git a/http-service/values.yaml b/http-service/values.yaml index 18d90942..5355f9150 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -41,15 +41,7 @@ pod: labels: {} securityContext: {} -affinity: {} # affinity - -# -- Node affinity for targeting specific node groups. -# Set key and value to generate a -# requiredDuringSchedulingIgnoredDuringExecution node affinity rule. -# Concrete defaults (e.g. ARM nodes) are set in Helmfile settings. -#nodeAffinity: -# key: "" -# value: "" +affinity: {} # affinity (e.g. node affinity for ARM, set via Helmfile settings) nodeSelector: {} # nodeSelector From 563b6b66a4699ecce3b7c46f89f423bfee0beb4b Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:32:46 +0900 Subject: [PATCH 23/44] Remove unused Pod spec fields from deployment template MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove hostNetwork, dnsPolicy, dnsConfig, shareProcessNamespace, and restartPolicy — none are needed for standard HTTP services. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 8e63ff7f..5e1e60ab 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -52,20 +52,6 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} spec: - {{- if .Values.useHostNetwork }} - hostNetwork: {{ .Values.useHostNetwork }} - dnsPolicy: ClusterFirstWithHostNet - {{- end }} - {{- if and (not .Values.useHostNetwork) .Values.dnsPolicy }} - dnsPolicy: {{ .Values.dnsPolicy }} - {{- end }} - {{- with .Values.shareProcessNamespace }} - shareProcessNamespace: {{ . }} - {{- end }} - {{- with .Values.dnsConfig }} - dnsConfig: - {{- toYaml . | indent 8 }} - {{- end }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- tpl (. | toYaml) $root | nindent 8 }} @@ -91,9 +77,6 @@ spec: tolerations: {{- tpl (. | toYaml) $root | nindent 8 }} {{- end }} - {{- with .Values.restartPolicy }} - restartPolicy: {{ . }} - {{- end }} {{- with .Values.priorityClassName }} priorityClassName: {{ . }} {{- end }} From fc37447ea2cd086ddecd3bab727825161d07a8d1 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:37:18 +0900 Subject: [PATCH 24/44] Replace tpl calls with plain toYaml in deployment template Remove tpl wrapper for simplicity. Template expressions within values are not needed for this opinionated chart. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 5e1e60ab..f51df3c4 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -54,15 +54,15 @@ spec: spec: {{- with .Values.imagePullSecrets }} imagePullSecrets: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.nodeSelector }} nodeSelector: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.readinessGates }} readinessGates: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} serviceAccountName: {{ include "http-service.serviceAccountName" . }} {{- with .Values.pod.securityContext }} @@ -75,7 +75,7 @@ spec: {{- end }} {{- with .Values.tolerations }} tolerations: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.priorityClassName }} priorityClassName: {{ . }} @@ -99,7 +99,7 @@ spec: {{- end }} {{- with .Values.volumes }} volumes: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- if .Values.initContainers.enabled }} initContainers: @@ -122,15 +122,15 @@ spec: {{- end }} {{- with $container.env }} env: - {{- tpl (. | toYaml) $root | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} {{- with $container.envFrom }} envFrom: - {{- tpl (. | toYaml) $root | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} {{- with $container.volumeMounts }} volumeMounts: - {{- tpl (. | toYaml) $root | nindent 12 }} + {{- toYaml . | nindent 12 }} {{- end }} {{- end }} {{- end }} From 0b38146372f392256ba676b1779d02745ec9df6b Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:51:56 +0900 Subject: [PATCH 25/44] Simplify topologySpreadConstraints to plain passthrough Replace the range loop with auto-injected selectorLabels with a simple toYaml passthrough. Users provide the full constraint spec including labelSelector, giving more flexibility. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index f51df3c4..33993c9c 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -86,16 +86,9 @@ spec: {{- with .Values.progressDeadlineSeconds }} progressDeadlineSeconds: {{ . }} {{- end }} - {{- if .Values.topologySpreadConstraints }} + {{- with .Values.topologySpreadConstraints }} topologySpreadConstraints: - {{- range .Values.topologySpreadConstraints }} - - maxSkew: {{ .maxSkew }} - topologyKey: {{ .topologyKey }} - whenUnsatisfiable: {{ .whenUnsatisfiable }} - labelSelector: - matchLabels: - {{- include "http-service.selectorLabels" $root | nindent 14 }} - {{- end }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with .Values.volumes }} volumes: From 47d2b9f114e06c454d2504fdf4c706b65262cd48 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:54:42 +0900 Subject: [PATCH 26/44] Add tolerations, topologySpreadConstraints, and extraPodSpec All three are passthrough fields: - tolerations: for tainted node groups - topologySpreadConstraints: for pod spread across topology domains - extraPodSpec: escape hatch for arbitrary Pod spec fields Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 3 +++ http-service/values.yaml | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 33993c9c..1faed865 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -94,6 +94,9 @@ spec: volumes: {{- toYaml . | nindent 8 }} {{- end }} + {{- with .Values.extraPodSpec }} + {{- toYaml . | nindent 6 }} + {{- end }} {{- if .Values.initContainers.enabled }} initContainers: {{- range $container := .Values.initContainers.containers }} diff --git a/http-service/values.yaml b/http-service/values.yaml index 5355f9150..c220a8a1 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -43,6 +43,10 @@ pod: affinity: {} # affinity (e.g. node affinity for ARM, set via Helmfile settings) +tolerations: [] # tolerations for tainted nodes + +topologySpreadConstraints: [] # pod spread across topology domains + nodeSelector: {} # nodeSelector imagePullSecrets: [] # imagePullSecrets @@ -55,6 +59,8 @@ progressDeadlineSeconds: "" # progressDeadlineSeconds volumes: [] # pod volumes(initContainers, containers) +extraPodSpec: {} # arbitrary additional Pod spec fields (escape hatch) + # -- Main container image image: repository: "" From 5055d8d5be5eb4f4fd9c2ea1b7934a616b6fc164 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 16:57:46 +0900 Subject: [PATCH 27/44] Rename slime to http-service in Service template Co-Authored-By: Claude Opus 4.6 --- http-service/templates/service.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/http-service/templates/service.yaml b/http-service/templates/service.yaml index eefb704c..7ffd6081 100644 --- a/http-service/templates/service.yaml +++ b/http-service/templates/service.yaml @@ -3,14 +3,14 @@ apiVersion: v1 kind: Service metadata: - name: {{ include "slime.fullname" . }} + name: {{ include "http-service.fullname" . }} namespace: {{ .Release.Namespace }} {{- with .Values.service.annotations }} annotations: {{- toYaml . | nindent 4 }} {{- end }} labels: - {{- include "slime.labels" . | nindent 4 }} + {{- include "http-service.labels" . | nindent 4 }} {{- with .Values.service.labels }} {{- toYaml . | nindent 4 }} {{- end }} @@ -30,5 +30,5 @@ spec: protocol: {{ $v.protocol }} {{- end }} selector: - {{- include "slime.selectorLabels" . | nindent 4 }} + {{- include "http-service.selectorLabels" . | nindent 4 }} {{- end }} From 53d21390b5b47291233164f8a6b6c859f1e80187 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 17:13:26 +0900 Subject: [PATCH 28/44] Revert ingress to generic map structure, rename to http-service Replace the private/public split with the original ingresses map. Private vs public is an infrastructure concern handled by Helmfile, not the chart. The chart stays vendor-neutral. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/ingress.yaml | 4 +-- http-service/values.yaml | 39 +++++++++++------------------ 2 files changed, 16 insertions(+), 27 deletions(-) diff --git a/http-service/templates/ingress.yaml b/http-service/templates/ingress.yaml index 6a814b52..9dbb0b9f 100644 --- a/http-service/templates/ingress.yaml +++ b/http-service/templates/ingress.yaml @@ -1,7 +1,7 @@ {{- if .Values.ingress.enabled }} {{- $root := . }} {{- range $name, $ingress := .Values.ingress.ingresses }} -{{- $fullName := include "slime.fullname" $root -}} +{{- $fullName := include "http-service.fullname" $root -}} --- apiVersion: networking.k8s.io/v1 kind: Ingress @@ -11,7 +11,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} labels: - {{- include "slime.labels" $root | nindent 4 }} + {{- include "http-service.labels" $root | nindent 4 }} {{- with $ingress.labels }} {{- toYaml . | nindent 4 }} {{- end }} diff --git a/http-service/values.yaml b/http-service/values.yaml index c220a8a1..790ceb86 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -215,31 +215,20 @@ podDisruptionBudget: minAvailable: # podDisruptionBudget minAvailable ingress: - # -- Private ingress (internal ALB) - private: - enabled: false - annotations: {} - labels: {} - hosts: [] - # - host: api.internal.example.com - # paths: - # - path: "/*" - # pathType: ImplementationSpecific - # portNumber: 80 - tls: [] - - # -- Public ingress (internet-facing ALB) - public: - enabled: false - annotations: {} - labels: {} - hosts: [] - # - host: api.example.com - # paths: - # - path: "/*" - # pathType: ImplementationSpecific - # portNumber: 80 - tls: [] + enabled: false # if true, you can use ingress + ingresses: {} # name -> spec map, create as many Ingress resources as needed + # your-ingress-name: + # annotations: {} + # labels: {} + # tls: [] + # - hosts: [] + # secretName: "" + # hosts: [] + # - host: + # paths: + # - path: "/*" + # pathType: ImplementationSpecific + # portNumber: 80 test: enabled: false # if true, you can use helm test From 9a8542d8d17652e1380d8921988e555f9ccf0cd8 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 17:31:59 +0900 Subject: [PATCH 29/44] Rename slime to http-service in PDB template, drop v1beta1 compat Remove the semverCompare branch for policy/v1beta1 which was removed in Kubernetes 1.25. Use policy/v1 unconditionally. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/pdb.yaml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/http-service/templates/pdb.yaml b/http-service/templates/pdb.yaml index 5deb9526..153c90ea 100644 --- a/http-service/templates/pdb.yaml +++ b/http-service/templates/pdb.yaml @@ -1,9 +1,5 @@ {{- if .Values.podDisruptionBudget.enabled }} -{{- if semverCompare ">=1.21-0" .Capabilities.KubeVersion.GitVersion }} apiVersion: policy/v1 -{{- else }} -apiVersion: policy/v1beta1 -{{- end }} kind: PodDisruptionBudget metadata: {{- with .Values.podDisruptionBudget.annotations }} @@ -11,11 +7,11 @@ metadata: {{- toYaml . | nindent 4}} {{- end }} labels: - {{- include "slime.labels" . | nindent 4 }} + {{- include "http-service.labels" . | nindent 4 }} {{- with .Values.podDisruptionBudget.labels }} {{- toYaml . | nindent 4 }} {{- end }} - name: {{ template "slime.fullname" . }} + name: {{ template "http-service.fullname" . }} namespace: {{ .Release.Namespace }} spec: {{- with .Values.podDisruptionBudget.maxUnavailable }} @@ -26,5 +22,5 @@ spec: {{- end }} selector: matchLabels: - {{- include "slime.selectorLabels" . | nindent 6 }} + {{- include "http-service.selectorLabels" . | nindent 6 }} {{- end }} From f859272c69ad8ebd79f693099e8a59ac38b36073 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 17:37:24 +0900 Subject: [PATCH 30/44] Rename remaining templates, delete cronjob, drop old API version compat - Rename slime to http-service in hpa, rbac, configmap, secret, test - Delete cronjob.yaml (not applicable to HTTP services) - HPA: use autoscaling/v2 unconditionally (v2beta2 removed in k8s 1.26) Co-Authored-By: Claude Opus 4.6 --- http-service/templates/configmap.yaml | 4 +- http-service/templates/cronjob.yaml | 292 ------------------------- http-service/templates/hpa.yaml | 10 +- http-service/templates/rbac.yaml | 28 +-- http-service/templates/secret.yaml | 4 +- http-service/templates/tests/test.yaml | 8 +- 6 files changed, 25 insertions(+), 321 deletions(-) delete mode 100644 http-service/templates/cronjob.yaml diff --git a/http-service/templates/configmap.yaml b/http-service/templates/configmap.yaml index 273e42dc..330ff3de 100644 --- a/http-service/templates/configmap.yaml +++ b/http-service/templates/configmap.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ include "slime.fullname" $root }}-{{ tpl $name $root }} + name: {{ include "http-service.fullname" $root }}-{{ tpl $name $root }} namespace: {{ $root.Release.Namespace }} labels: - {{- include "slime.labels" $root | nindent 4 }} + {{- include "http-service.labels" $root | nindent 4 }} {{- with $value.labels }} {{- toYaml . | nindent 4 }} {{- end }} diff --git a/http-service/templates/cronjob.yaml b/http-service/templates/cronjob.yaml deleted file mode 100644 index 0db283fa..00000000 --- a/http-service/templates/cronjob.yaml +++ /dev/null @@ -1,292 +0,0 @@ -{{- if .Values.cronJob.enabled -}} -{{- $root := . }} -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#cronjob-v1-batch -apiVersion: batch/v1 -kind: CronJob -metadata: - name: {{ include "slime.fullname" . }} - namespace: {{ .Release.Namespace }} - {{- with .Values.cronJobAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - {{- include "slime.labels" . | nindent 4 }} - {{- with .Values.cronJobLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#cronjobspec-v1-batch -spec: - {{- with .Values.concurrencyPolicy }} - concurrencyPolicy: {{ . }} - {{- end }} - {{- with .Values.failedJobsHistoryLimit }} - failedJobsHistoryLimit: {{ . }} - {{- end }} - schedule: "{{ .Values.schedule }}" - {{- with .Values.startingDeadlineSeconds }} - startingDeadlineSeconds: {{ . }} - {{- end }} - {{- with .Values.successfulJobsHistoryLimit }} - successfulJobsHistoryLimit: {{ . }} - {{- end }} - {{- with .Values.suspend }} - suspend: {{ . }} - {{- end }} - # Available in 1.27 or later - # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#cron-job-limitations - {{- with .Values.timeZone }} - timeZone: {{ . }} - {{- end }} - # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#jobtemplatespec-v1-batch - jobTemplate: - metadata: - {{- with .Values.cronJobTemplateAnnotations }} - annotations: - {{- toYaml . | nindent 8 }} - {{- end }} - labels: - {{- include "slime.labels" . | nindent 8 }} - {{- with .Values.cronJobTemplateLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#jobspec-v1-batch - {{- with .Values.activeDeadlineSeconds }} - activeDeadlineSeconds: {{ . }} - {{- end }} - {{- with .Values.backoffLimit }} - backoffLimit: {{ . }} - {{- end }} - {{- with .Values.completionMode }} - completionMode: {{ . }} - {{- end }} - {{- with .Values.completions }} - completions: {{ . }} - {{- end }} - {{- with .Values.manualSelector }} - manualSelector: {{ . }} - {{- end }} - {{- with .Values.parallelism }} - parallelism: {{ . }} - {{- end }} - {{- with .Values.jobTemplateSuspend }} - suspend: {{ . }} - {{- end }} - {{- with .Values.ttlSecondsAfterFinished }} - ttlSecondsAfterFinished: {{ . }} - {{- end }} - template: - metadata: - {{- with .Values.cronJobPodAnnotations }} - annotations: - {{- toYaml . | nindent 12 }} - {{- end }} - labels: - {{- include "slime.labels" . | nindent 12 }} - {{- with .Values.cronJobPodLabels }} - {{- toYaml . | nindent 12 }} - {{- end }} - spec: - {{- with .Values.cronJobPodActiveDeadlineSeconds }} - activeDeadlineSeconds: {{ . }} - {{- end }} - {{- with .Values.cronJobAffinity }} - affinity: - {{- tpl (. | toYaml) $root | nindent 14 }} - {{- end }} - {{- with .Values.automountServiceAccountToken }} - automountServiceAccountToken: {{ . }} - {{- end }} - containers: - {{- range $container := .Values.cronJobContainers }} - - name: {{ include "slime.fullname" $root }}-{{ $container.name }} - {{- with $container.securityContext }} - securityContext: - {{- toYaml . | nindent 16 }} - {{- end }} - image: {{ $container.image.repository }}:{{ $container.image.tag }} - {{- with $container.image.pullPolicy }} - imagePullPolicy: {{ . }} - {{- end }} - {{- with $container.ports }} - ports: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- with $container.command }} - command: - {{- toYaml . | nindent 16 }} - {{- end }} - {{- with $container.args }} - args: - {{- toYaml . | nindent 16 }} - {{- end }} - {{- with $container.workingDir }} - workingDir: {{ . }} - {{- end }} - {{- with $container.env }} - env: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- with $container.envFrom }} - envFrom: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- with $container.lifecycle }} - lifecycle: - {{- toYaml . | nindent 16 }} - {{- end }} - {{- with $container.startupProbe }} - startupProbe: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- with $container.readinessProbe }} - readinessProbe: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- with $container.livenessProbe }} - livenessProbe: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- with $container.resources }} - resources: - {{- toYaml . | nindent 16 }} - {{- end }} - {{- with $container.volumeMounts }} - volumeMounts: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- end }} - {{- with .Values.cronJobDnsConfig }} - dnsConfig: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.cronJobUseHostNetwork }} - hostNetwork: {{ .Values.cronJobUseHostNetwork }} - dnsPolicy: ClusterFirstWithHostNet - {{- end }} - {{- if and (not .Values.cronJobUseHostNetwork) .Values.cronJobDnsPolicy }} - dnsPolicy: {{ .Values.cronJobDnsPolicy }} - {{- end }} - {{- with .Values.cronJobEnableServiceLinks }} - enableServiceLinks: {{ . }} - {{- end }} - {{- with .Values.cronJobHostAliases }} - hostAliases: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.cronJobHostIPC }} - hostIPC: {{ . }} - {{- end }} - {{- with .Values.cronJobHostPID }} - hostPID: {{ . }} - {{- end }} - {{- with .Values.cronJobHostUsers }} - hostUsers: {{ . }} - {{- end }} - {{- with .Values.cronJobHostname }} - hostname: {{ . }} - {{- end }} - {{- with .Values.cronJobImagePullSecrets }} - imagePullSecrets: - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- if .Values.cronJobInitContainers.enabled }} - initContainers: - {{- range $container := .Values.cronJobInitContainers.containers }} - - name: {{ include "slime.fullname" $root }}-{{ $container.name }} - image: {{ $container.image.repository }}:{{ $container.image.tag }} - {{- with $container.image.pullPolicy }} - imagePullPolicy: {{ . }} - {{- end }} - {{- with $container.command }} - command: - {{- toYaml . | nindent 14 }} - {{- end }} - {{- with $container.args }} - args: - {{- toYaml . | nindent 14 }} - {{- end }} - {{- with $container.workingDir }} - workingDir: {{ . }} - {{- end }} - {{- with $container.env }} - env: - {{- tpl (. | toYaml) $root | nindent 16 }} - {{- end }} - {{- with $container.envFrom }} - envFrom: - {{- tpl (. | toYaml) $root | nindent 14 }} - {{- end }} - {{- with $container.volumeMounts }} - volumeMounts: - {{- tpl (. | toYaml) $root | nindent 14 }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.cronJobNodeName }} - nodeName: {{ . }} - {{- end }} - {{- with .Values.cronJobNodeSelector }} - nodeSelector: - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- with .Values.cronJobPreemptionPolicy }} - preemptionPolicy: {{ . }} - {{- end }} - {{- with .Values.cronJobPriority }} - priority: {{ . }} - {{- end }} - {{- with .Values.cronJobPriorityClassName }} - priorityClassName: {{ . }} - {{- end }} - {{- if .Values.cronJobRestartPolicy }} - restartPolicy: {{ .Values.cronJobRestartPolicy }} - {{- else }} - restartPolicy: "OnFailure" - {{- end }} - {{- with .Values.cronJobSchedulerName }} - schedulerName: {{ . }} - {{- end }} - serviceAccountName: {{ include "slime.serviceAccountName" . }} - {{- with .Values.cronJobSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.cronJobSetHostnameAsFQDN }} - setHostnameAsFQDN: {{ . }} - {{- end }} - {{- with .Values.cronJobShareProcessNamespace }} - shareProcessNamespace: {{ . }} - {{- end }} - {{- with .Values.cronJobSubdomain }} - subdomain: {{ . }} - {{- end }} - {{- with .Values.cronJobTerminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ . }} - {{- end }} - {{- with .Values.cronJobTolerations }} - tolerations: - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- if .Values.cronJobTopologySpreadConstraints.enabled }} - topologySpreadConstraints: - {{- range $constraint := .Values.cronJobTopologySpreadConstraints.constraints }} - - maxSkew: {{ $constraint.maxSkew }} - topologyKey: {{ $constraint.topologyKey }} - whenUnsatisfiable: {{ $constraint.whenUnsatisfiable }} - labelSelector: - matchLabels: - {{- include "slime.selectorLabels" $root | nindent 18 }} - {{- end }} - {{- end }} - {{- if or (.Values.cronJobVolumes) (.Values.extraCronJobVolumes) }} - volumes: - {{- with .Values.cronJobVolumes }} - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- with .Values.extraCronJobVolumes }} - {{- tpl (. | toYaml) $root | nindent 12 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/http-service/templates/hpa.yaml b/http-service/templates/hpa.yaml index 2a89d6be..1581b04b 100644 --- a/http-service/templates/hpa.yaml +++ b/http-service/templates/hpa.yaml @@ -1,16 +1,12 @@ {{- $root := . -}} {{- if .Values.autoscaling.enabled -}} -{{- if semverCompare ">=1.23-0" .Capabilities.KubeVersion.GitVersion }} apiVersion: autoscaling/v2 -{{- else }} -apiVersion: autoscaling/v2beta2 -{{- end }} kind: HorizontalPodAutoscaler metadata: - name: {{ include "slime.fullname" . }} + name: {{ include "http-service.fullname" . }} namespace: {{ .Release.Namespace }} labels: - {{- include "slime.labels" . | nindent 4 }} + {{- include "http-service.labels" . | nindent 4 }} spec: {{- with .Values.autoscaling.behavior }} behavior: @@ -21,7 +17,7 @@ spec: scaleTargetRef: apiVersion: apps/v1 kind: Deployment - name: {{ include "slime.fullname" . }} + name: {{ include "http-service.fullname" . }} minReplicas: {{ .Values.autoscaling.minReplicas }} maxReplicas: {{ .Values.autoscaling.maxReplicas }} {{- end }} diff --git a/http-service/templates/rbac.yaml b/http-service/templates/rbac.yaml index c4f9f7b9..e70c3861 100644 --- a/http-service/templates/rbac.yaml +++ b/http-service/templates/rbac.yaml @@ -3,8 +3,8 @@ kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - {{- include "slime.labels" . | nindent 4 }} - name: {{ include "slime.fullname" . }} + {{- include "http-service.labels" . | nindent 4 }} + name: {{ include "http-service.fullname" . }} rules: {{- with .Values.clusterRole.rules }} {{- toYaml . | nindent 2 }} @@ -14,15 +14,15 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - {{- include "slime.labels" . | nindent 4 }} - name: {{ include "slime.fullname" . }} + {{- include "http-service.labels" . | nindent 4 }} + name: {{ include "http-service.fullname" . }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: {{ include "slime.fullname" . }} + name: {{ include "http-service.fullname" . }} subjects: - kind: ServiceAccount - name: {{ include "slime.serviceAccountName" . }} + name: {{ include "http-service.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- {{- end }} @@ -31,8 +31,8 @@ kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - {{- include "slime.labels" . | nindent 4 }} - name: {{ include "slime.fullname" . }} + {{- include "http-service.labels" . | nindent 4 }} + name: {{ include "http-service.fullname" . }} namespace: {{ .Release.Namespace }} rules: {{- with .Values.role.rules }} @@ -43,16 +43,16 @@ kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: labels: - {{- include "slime.labels" . | nindent 4 }} - name: {{ include "slime.fullname" . }} + {{- include "http-service.labels" . | nindent 4 }} + name: {{ include "http-service.fullname" . }} namespace: {{ .Release.Namespace }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: {{ include "slime.fullname" . }} + name: {{ include "http-service.fullname" . }} subjects: - kind: ServiceAccount - name: {{ include "slime.serviceAccountName" . }} + name: {{ include "http-service.serviceAccountName" . }} namespace: {{ .Release.Namespace }} --- {{- end }} @@ -63,10 +63,10 @@ metadata: annotations: {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} labels: - {{- include "slime.labels" . | nindent 4 }} + {{- include "http-service.labels" . | nindent 4 }} {{- with .Values.serviceAccount.labels }} {{- toYaml . | nindent 4 }} {{- end }} - name: {{ include "slime.serviceAccountName" . }} + name: {{ include "http-service.serviceAccountName" . }} namespace: {{ .Release.Namespace }} {{- end -}} diff --git a/http-service/templates/secret.yaml b/http-service/templates/secret.yaml index 43a7e5d3..44429105 100644 --- a/http-service/templates/secret.yaml +++ b/http-service/templates/secret.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: Secret metadata: - name: {{ include "slime.fullname" $root }}-{{ tpl $name $root }} + name: {{ include "http-service.fullname" $root }}-{{ tpl $name $root }} namespace: {{ $root.Release.Namespace }} labels: - {{- include "slime.labels" $root | nindent 4 }} + {{- include "http-service.labels" $root | nindent 4 }} {{- with $value.labels }} {{- toYaml . | nindent 4 }} {{- end }} diff --git a/http-service/templates/tests/test.yaml b/http-service/templates/tests/test.yaml index 208374e6..72406369 100644 --- a/http-service/templates/tests/test.yaml +++ b/http-service/templates/tests/test.yaml @@ -3,17 +3,17 @@ apiVersion: v1 kind: Pod metadata: - name: "{{ include "slime.fullname" . }}-test" + name: "{{ include "http-service.fullname" . }}-test" namespace: {{ .Release.Namespace }} labels: - helm.sh/chart: {{ include "slime.chart" . }} + helm.sh/chart: {{ include "http-service.chart" . }} app.kubernetes.io/managed-by: {{ .Release.Service }} annotations: "helm.sh/hook": test spec: containers: {{- range $container := .Values.test.containers }} - - name: {{ include "slime.fullname" $root }}-{{ $container.name }}-test + - name: {{ include "http-service.fullname" $root }}-{{ $container.name }}-test image: {{ $container.image.repository }}:{{ $container.image.tag }} {{- with $container.image.pullPolicy }} imagePullPolicy: {{ . }} @@ -28,7 +28,7 @@ spec: - name: NAMESPACE value: {{ $root.Release.Namespace }} - name: APP_FULLNAME - value: {{ include "slime.fullname" $root }} + value: {{ include "http-service.fullname" $root }} {{- with $container.env }} {{- toYaml $container.env | nindent 8 }} {{- end }} From 3e92ff696cb6f7257e37f8f4562b87f876a19528 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 18:05:07 +0900 Subject: [PATCH 31/44] Redesign autoscaling with type-based switching (none/hpa/keda) Replace the boolean enabled flag with a type selector. Default is none (fixed replicas). HPA and KEDA sections are commented out and become required when their type is selected. minReplicas/maxReplicas also commented out since they are only needed when autoscaling is active. Co-Authored-By: Claude Opus 4.6 --- http-service/values.yaml | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/http-service/values.yaml b/http-service/values.yaml index 790ceb86..03247acf 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -162,11 +162,23 @@ secrets: {} # transform Secret's manifest. You can set `data`, `stringData` and # key: base64-encoded-value autoscaling: - enabled: false # if true, you can use hpa - behavior: {} # autscaling behavior https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior - metrics: [] # autoscaling metrics - maxReplicas: 2 # autoscaling maxReplicas - minReplicas: 1 # autoscaling minReplicas + # type: none - fixed replicas, no autoscaler + # type: hpa - Kubernetes-native HorizontalPodAutoscaler + # type: keda - KEDA ScaledObject (manages HPA internally) + type: none + #minReplicas: 1 + #maxReplicas: 10 + + # -- HPA-specific (required when type: hpa) + #hpa: + # metrics: [] + # behavior: {} + + # -- KEDA-specific (required when type: keda) + #keda: + # triggers: [] + # pollingInterval: 30 + # cooldownPeriod: 300 service: enabled: false # if true, you can use service From 368cf181ff2e756cfda09e8a8ba9b6cc70505ebd Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 18:06:17 +0900 Subject: [PATCH 32/44] Rewrite HPA template with type-based autoscaling and validation Validates autoscaling.type is one of none/hpa/keda. When type is hpa, renders HPA with required metrics and minReplicas/maxReplicas. behavior is optional. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/hpa.yaml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/http-service/templates/hpa.yaml b/http-service/templates/hpa.yaml index 1581b04b..1f6b35c0 100644 --- a/http-service/templates/hpa.yaml +++ b/http-service/templates/hpa.yaml @@ -1,5 +1,7 @@ -{{- $root := . -}} -{{- if .Values.autoscaling.enabled -}} +{{- if not (has .Values.autoscaling.type (list "none" "hpa" "keda")) }} +{{- fail "autoscaling.type must be one of: none, hpa, keda" }} +{{- end }} +{{- if eq .Values.autoscaling.type "hpa" }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler metadata: @@ -8,16 +10,16 @@ metadata: labels: {{- include "http-service.labels" . | nindent 4 }} spec: - {{- with .Values.autoscaling.behavior }} + {{- with (required "autoscaling.hpa.behavior is required when type is hpa" .Values.autoscaling.hpa).behavior }} behavior: {{- toYaml . | nindent 4 }} {{- end }} metrics: - {{- toYaml .Values.autoscaling.metrics | nindent 4 }} + {{- toYaml (required "autoscaling.hpa.metrics is required when type is hpa" .Values.autoscaling.hpa.metrics) | nindent 4 }} scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: {{ include "http-service.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} + minReplicas: {{ required "autoscaling.minReplicas is required" .Values.autoscaling.minReplicas }} + maxReplicas: {{ required "autoscaling.maxReplicas is required" .Values.autoscaling.maxReplicas }} {{- end }} From 18785fa8c4eec607347f160d731415458b2f1caa Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 18:09:23 +0900 Subject: [PATCH 33/44] Add KEDA ScaledObject template for type keda autoscaling When autoscaling.type is keda, renders a ScaledObject with required triggers and minReplicaCount/maxReplicaCount. pollingInterval and cooldownPeriod are optional. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/scaled-object.yaml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 http-service/templates/scaled-object.yaml diff --git a/http-service/templates/scaled-object.yaml b/http-service/templates/scaled-object.yaml new file mode 100644 index 00000000..ccacb7f4 --- /dev/null +++ b/http-service/templates/scaled-object.yaml @@ -0,0 +1,22 @@ +{{- if eq .Values.autoscaling.type "keda" }} +apiVersion: keda.sh/v1alpha1 +kind: ScaledObject +metadata: + name: {{ include "http-service.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "http-service.labels" . | nindent 4 }} +spec: + scaleTargetRef: + name: {{ include "http-service.fullname" . }} + minReplicaCount: {{ required "autoscaling.minReplicas is required" .Values.autoscaling.minReplicas }} + maxReplicaCount: {{ required "autoscaling.maxReplicas is required" .Values.autoscaling.maxReplicas }} + {{- with (required "autoscaling.keda is required when type is keda" .Values.autoscaling.keda).pollingInterval }} + pollingInterval: {{ . }} + {{- end }} + {{- with .Values.autoscaling.keda.cooldownPeriod }} + cooldownPeriod: {{ . }} + {{- end }} + triggers: + {{- toYaml (required "autoscaling.keda.triggers is required when type is keda" .Values.autoscaling.keda.triggers) | nindent 4 }} +{{- end }} From d206edb4e26cd947b7461bfdecb7fdfaa70c7667 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 18:09:57 +0900 Subject: [PATCH 34/44] Update deployment replicas condition for type-based autoscaling Only set fixed replicas when autoscaling.type is none. When hpa or keda is active, the autoscaler manages replica count. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 1faed865..49d8b2c5 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -22,7 +22,7 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: - {{- if and (not .Values.autoscaling.enabled) .Values.replicas }} + {{- if and (eq .Values.autoscaling.type "none") .Values.replicas }} replicas: {{ .Values.replicas }} {{- end }} selector: From 6c97870a950c7c8ff9381d0fb487dd8936d9d349 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 18:12:37 +0900 Subject: [PATCH 35/44] Validate replicas and autoscaling are mutually exclusive Fail template rendering if replicas is set while autoscaling.type is hpa or keda. Update replicas comment to reflect type-based autoscaling. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 7 +++++-- http-service/values.yaml | 4 ++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 49d8b2c5..6b5549ca 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -22,8 +22,11 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: - {{- if and (eq .Values.autoscaling.type "none") .Values.replicas }} - replicas: {{ .Values.replicas }} + {{- if and (ne .Values.autoscaling.type "none") .Values.replicas }} + {{- fail "replicas should not be set when autoscaling.type is hpa or keda — the autoscaler manages replica count" }} + {{- end }} + {{- with .Values.replicas }} + replicas: {{ . }} {{- end }} selector: matchLabels: diff --git a/http-service/values.yaml b/http-service/values.yaml index 03247acf..a958ec5c 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -21,8 +21,8 @@ deployment: annotations: {} labels: {} -# If you want fix replicaCount, disable autoscaling.enabled and uncomment the following line -#replicas: 1 # replicas for deployment +# -- Fixed replica count (used when autoscaling.type is none) +#replicas: 1 revisionHistoryLimit: "" # revisionHistoryLimit From 956aad506f8472814844586005d14175f375b63f Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Thu, 5 Mar 2026 18:14:32 +0900 Subject: [PATCH 36/44] Add Argo Rollouts support with HPA/KEDA scaleTargetRef switching New rollout.yaml template creates a Rollout resource using workloadRef to reference the existing Deployment. When rollout.enabled is true, HPA and KEDA scaleTargetRef automatically switch from Deployment to Rollout. rollout.strategy is required when enabled. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/hpa.yaml | 5 +++++ http-service/templates/rollout.yaml | 22 ++++++++++++++++++++++ http-service/templates/scaled-object.yaml | 7 +++++++ http-service/values.yaml | 10 ++++++++++ 4 files changed, 44 insertions(+) create mode 100644 http-service/templates/rollout.yaml diff --git a/http-service/templates/hpa.yaml b/http-service/templates/hpa.yaml index 1f6b35c0..725f20f3 100644 --- a/http-service/templates/hpa.yaml +++ b/http-service/templates/hpa.yaml @@ -17,8 +17,13 @@ spec: metrics: {{- toYaml (required "autoscaling.hpa.metrics is required when type is hpa" .Values.autoscaling.hpa.metrics) | nindent 4 }} scaleTargetRef: + {{- if .Values.rollout.enabled }} + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + {{- else }} apiVersion: apps/v1 kind: Deployment + {{- end }} name: {{ include "http-service.fullname" . }} minReplicas: {{ required "autoscaling.minReplicas is required" .Values.autoscaling.minReplicas }} maxReplicas: {{ required "autoscaling.maxReplicas is required" .Values.autoscaling.maxReplicas }} diff --git a/http-service/templates/rollout.yaml b/http-service/templates/rollout.yaml new file mode 100644 index 00000000..90278bd0 --- /dev/null +++ b/http-service/templates/rollout.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rollout.enabled }} +apiVersion: argoproj.io/v1alpha1 +kind: Rollout +metadata: + name: {{ include "http-service.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "http-service.labels" . | nindent 4 }} + {{- with .Values.deployment.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + workloadRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "http-service.fullname" . }} + {{- with .Values.revisionHistoryLimit }} + revisionHistoryLimit: {{ . }} + {{- end }} + strategy: + {{- toYaml (required "rollout.strategy is required when rollout.enabled is true" .Values.rollout.strategy) | nindent 4 }} +{{- end }} diff --git a/http-service/templates/scaled-object.yaml b/http-service/templates/scaled-object.yaml index ccacb7f4..7bdb65c7 100644 --- a/http-service/templates/scaled-object.yaml +++ b/http-service/templates/scaled-object.yaml @@ -8,6 +8,13 @@ metadata: {{- include "http-service.labels" . | nindent 4 }} spec: scaleTargetRef: + {{- if .Values.rollout.enabled }} + apiVersion: argoproj.io/v1alpha1 + kind: Rollout + {{- else }} + apiVersion: apps/v1 + kind: Deployment + {{- end }} name: {{ include "http-service.fullname" . }} minReplicaCount: {{ required "autoscaling.minReplicas is required" .Values.autoscaling.minReplicas }} maxReplicaCount: {{ required "autoscaling.maxReplicas is required" .Values.autoscaling.maxReplicas }} diff --git a/http-service/values.yaml b/http-service/values.yaml index a958ec5c..f54acf1c 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -180,6 +180,16 @@ autoscaling: # pollingInterval: 30 # cooldownPeriod: 300 +# -- Argo Rollouts integration. +# When enabled, creates a Rollout resource that references the Deployment +# via workloadRef for canary/blue-green deployment strategies. +rollout: + enabled: false + #strategy: + # canary: + # maxSurge: "25%" + # maxUnavailable: 0 + service: enabled: false # if true, you can use service type: ClusterIP # service type(ClusterIP, NodePort, LoadBalancer) From 08573725877bf31b949d1c2521b7e63abc47e2e6 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 09:53:42 +0900 Subject: [PATCH 37/44] Remove CronJob example files CronJob template was deleted from http-service chart. Co-Authored-By: Claude Opus 4.6 --- http-service/examples/cronjob-advanced.yaml | 161 ------------------ .../examples/cronjob-extra-volumes.yaml | 26 --- http-service/examples/cronjob.yaml | 14 -- 3 files changed, 201 deletions(-) delete mode 100644 http-service/examples/cronjob-advanced.yaml delete mode 100644 http-service/examples/cronjob-extra-volumes.yaml delete mode 100644 http-service/examples/cronjob.yaml diff --git a/http-service/examples/cronjob-advanced.yaml b/http-service/examples/cronjob-advanced.yaml deleted file mode 100644 index 0399e3c0..00000000 --- a/http-service/examples/cronjob-advanced.yaml +++ /dev/null @@ -1,161 +0,0 @@ -fullnameOverride: "slime-advanced" - -cronJob: - enabled: true - -cronJobAnnotations: - release/cronjob-annotation: advanced - -cronJobLabels: - release/cronjob-label: advanced - -schedule: "* * * * *" -concurrencyPolicy: Replace -failedJobsHistoryLimit: 3 -startingDeadlineSeconds: 60 -successfulJobsHistoryLimit: 5 -timeZone: Asia/Tokyo - -cronJobTemplateAnnotations: - slime/cronjob-template-annotation: advanced - -cronJobTemplateLabels: - slime/cronjob-template-label: advanced - -activeDeadlineSeconds: 30 -backoffLimit: 10 - - -#completionMode: NonIndexed -#completions: 1 -#manualSelector: true -#parallelism: 1 -#jobTemplateSuspend: "false" -#ttlSecondsAfterFinished: 300 - -cronJobPodActiveDeadlineSeconds: 15 - -#cronJobAffinity: -# podAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# - labelSelector: -# matchExpressions: -# - key: security -# operator: In -# values: -# - S1 - -#automountServiceAccountToken: true - -#cronJobDnsConfig: -# nameservers: -# - 127.0.0.1 -# searches: -# - ns1.svc.cluster.local - -#cronJobUseHostNetwork: true -#cronJobEnableServiceLinks: true -#cronJobHostAliases: -#- ip: "127.0.0.1" -# hostnames: -# - "foo.local" -# - "bar.local" -#cronJobHostIPC: true -#cronJobHostPID: true -#cronJobHostUsers: true -#cronJobHostname: test - -#cronJobImagePullSecrets: -# - name: slime-secret - -cronJobPodAnnotations: - slime/cronjob-pod-annotation: advanced - -cronJobPodLabels: - slime/cronjob-pod-label: advanced - -cronJobContainers: - - name: job - image: - repository: hello-world - tag: latest - imagePullPolicy: IfNotPresent - env: - - name: APP_NAME - value: 'slime-advanced' - envFrom: - - secretRef: - name: 'slime-advanced-env' - volumeMounts: - - name: configs - mountPath: /configs - -cronJobInitContainers: - enabled: true - containers: - - name: init - image: - repository: busybox - tag: latest - -cronJobVolumes: - - name: configs - configMap: - name: "slime-advanced-config" - defaultMode: 0644 - - -#cronJobNodeName: slime - -#cronJobPreemptionPolicy: Never - -#cronJobNodeSelector: -# disktype: ssd - -#cronJobPriority: 1 - -#cronJobPriorityClassName: system-node-critical - -#cronJobRestartPolicy: Never - -#cronJobSchedulerName: slime - -#cronJobSecurityContext: -# runAsUser: 1000 -# fsGroup: 2000 - -#cronJobSetHostnameAsFQDN: "false" - -#cronJobShareProcessNamespace: "false" - -#cronJobSubdomain: hostname.subdomain.pod-namespace.svc.cluster-domain - -cronJobTerminationGracePeriodSeconds: 120 - -#cronJobTolerations: -#- key: "key1" -# operator: "Equal" -# value: "value1" -# effect: "NoSchedule" - -#cronJobTopologySpreadConstraints: -# enabled: true -# constraints: -# - maxSkew: 1 -# topologyKey: topology.kubernetes.io/zone -# whenUnsatisfiable: DoNotSchedule -# labelSelector: -# app: myapp - -secrets: - env: - type: Opaque - data: - SECRET_VALUE: 'slime-advanced' - -configmaps: - config: - data: - greeting: "Hello slime." - properties: | - mode=default diff --git a/http-service/examples/cronjob-extra-volumes.yaml b/http-service/examples/cronjob-extra-volumes.yaml deleted file mode 100644 index 391afd27..00000000 --- a/http-service/examples/cronjob-extra-volumes.yaml +++ /dev/null @@ -1,26 +0,0 @@ -cronJob: - enabled: true - -schedule: "*/5 * * * *" -restartPolicy: OnFailure - -cronJobContainers: - - name: job - image: - repository: hello-world - tag: latest - imagePullPolicy: IfNotPresent - -cronJobRestartPolicy: Never - -cronJobVolumes: - - name: configs - configMap: - name: "slime-config" - defaultMode: 0644 - -extraCronJobVolumes: - - name: extra-configs - configMap: - name: "slime-extra-config" - defaultMode: 0644 diff --git a/http-service/examples/cronjob.yaml b/http-service/examples/cronjob.yaml deleted file mode 100644 index f7a0e8cf..00000000 --- a/http-service/examples/cronjob.yaml +++ /dev/null @@ -1,14 +0,0 @@ -cronJob: - enabled: true - -schedule: "*/5 * * * *" -restartPolicy: OnFailure - -cronJobContainers: - - name: job - image: - repository: hello-world - tag: latest - imagePullPolicy: IfNotPresent - -cronJobRestartPolicy: Never From c0f90739dec0953640b659f63dfc4f50ab1352f3 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 16:22:32 +0900 Subject: [PATCH 38/44] Update example values for new chart structure Rewrite existing examples (deployment, hpa, ingress) to match new values structure (single container, gracefulShutdown, required probes, datadog tags, type-based autoscaling). Add new examples for KEDA autoscaling and Argo Rollouts. Co-Authored-By: Claude Opus 4.6 --- http-service/examples/deployment-hpa.yaml | 137 ++++++-------- http-service/examples/deployment-ingress.yaml | 101 ++++------ http-service/examples/deployment-keda.yaml | 61 ++++++ http-service/examples/deployment-rollout.yaml | 68 +++++++ http-service/examples/deployment.yaml | 173 +++++------------- 5 files changed, 260 insertions(+), 280 deletions(-) create mode 100644 http-service/examples/deployment-keda.yaml create mode 100644 http-service/examples/deployment-rollout.yaml diff --git a/http-service/examples/deployment-hpa.yaml b/http-service/examples/deployment-hpa.yaml index f20873ff..85abdb2c 100644 --- a/http-service/examples/deployment-hpa.yaml +++ b/http-service/examples/deployment-hpa.yaml @@ -1,95 +1,64 @@ -fullnameOverride: nginx-example-hpa -nameOverride: nginx-example-hpa - -deployment: +# Deployment with HPA autoscaling +image: + repository: nginx + tag: latest + +containerPort: 80 + +gracefulShutdown: + trafficDrainSeconds: 25 + appShutdownTimeoutSeconds: 30 + +startupProbe: + httpGet: + path: / + port: 80 + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 1 + +livenessProbe: + httpGet: + path: / + port: 80 + failureThreshold: 2 + periodSeconds: 10 + +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + +datadog: enabled: true + env: production + service: nginx-example-hpa + version: "1.0.0" -strategy: - type: RollingUpdate - - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - -annotations: - a.b: c - -labels: - a/b: c - -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 10 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app.kubernetes.io/instance - operator: In - values: - - "{{ .Release.Name}}" - topologyKey: kubernetes.io/hostname - -containers: - - name: nginx - image: - repository: nginx - tag: latest - - lifecycle: - preStop: - exec: - command: ["sh", "-c", "sleep 10"] - - ports: - - name: http - containerPort: 80 - protocol: TCP - - resources: - requests: - cpu: 0.1 - memory: 256Mi - limits: - cpu: 0.1 - memory: 256Mi - - - name: ubuntu-sleep-infinity - image: - repository: ubuntu - tag: latest - - command: ["sleep"] - args: ["infinity"] +autoscaling: + type: hpa + minReplicas: 2 + maxReplicas: 10 + hpa: + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + behavior: + scaleDown: + stabilizationWindowSeconds: 300 service: enabled: true type: ClusterIP - clusterIP: None ports: http: targetPort: 80 port: 80 protocol: TCP - -autoscaling: - enabled: true - maxReplicas: 2 - minReplicas: 2 - metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 50 - behavior: - scaleDown: - stabilizationWindowSeconds: 300 - -podDisruptionBudget: - enabled: true - maxUnavailable: 1 - -test: - enabled: false diff --git a/http-service/examples/deployment-ingress.yaml b/http-service/examples/deployment-ingress.yaml index 444c3eee..55976ad9 100644 --- a/http-service/examples/deployment-ingress.yaml +++ b/http-service/examples/deployment-ingress.yaml @@ -1,36 +1,44 @@ -fullnameOverride: nginx-example-ingress -nameOverride: nginx-example-ingress +# Deployment with Ingress +image: + repository: nginx + tag: latest -deployment: - enabled: true +containerPort: 80 -strategy: - type: RollingUpdate +replicas: 2 - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 +gracefulShutdown: + trafficDrainSeconds: 25 + appShutdownTimeoutSeconds: 30 -replicas: 2 +startupProbe: + httpGet: + path: / + port: 80 + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 1 -containers: - - name: nginx - image: - repository: nginx - tag: latest +livenessProbe: + httpGet: + path: / + port: 80 + failureThreshold: 2 + periodSeconds: 10 - ports: - - name: http - containerPort: 80 - protocol: TCP +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi - resources: - requests: - cpu: 0.1 - memory: 128Mi - limits: - cpu: 0.1 - memory: 128Mi +datadog: + enabled: true + env: production + service: nginx-example-ingress + version: "1.0.0" service: enabled: true @@ -44,51 +52,14 @@ service: ingress: enabled: true ingresses: - example1: + main: ingressClassName: alb annotations: - alb.ingress.kubernetes.io/backend-protocol: HTTP - alb.ingress.kubernetes.io/healthcheck-path: / - alb.ingress.kubernetes.io/healthcheck-port: '80' - alb.ingress.kubernetes.io/healthcheck-protocol: HTTP - alb.ingress.kubernetes.io/inbound-cidrs: '0.0.0.0/0' - alb.ingress.kubernetes.io/listen-ports: |- - [ - { - "HTTP": 80 - } - ] alb.ingress.kubernetes.io/scheme: internal alb.ingress.kubernetes.io/target-type: ip hosts: - host: example.com paths: - - path: "/path1/*" + - path: "/*" pathType: ImplementationSpecific portNumber: 80 - - example2: - ingressClassName: alb - annotations: - alb.ingress.kubernetes.io/backend-protocol: HTTP - alb.ingress.kubernetes.io/healthcheck-path: / - alb.ingress.kubernetes.io/healthcheck-port: '80' - alb.ingress.kubernetes.io/healthcheck-protocol: HTTP - alb.ingress.kubernetes.io/inbound-cidrs: '0.0.0.0/0' - alb.ingress.kubernetes.io/listen-ports: |- - [ - { - "HTTP": 80 - } - ] - alb.ingress.kubernetes.io/scheme: internal - alb.ingress.kubernetes.io/target-type: ip - hosts: - - host: example.com - paths: - - path: "/path2/*" - pathType: ImplementationSpecific - portNumber: 80 - -test: - enabled: false diff --git a/http-service/examples/deployment-keda.yaml b/http-service/examples/deployment-keda.yaml new file mode 100644 index 00000000..0ced1aa9 --- /dev/null +++ b/http-service/examples/deployment-keda.yaml @@ -0,0 +1,61 @@ +# Deployment with KEDA autoscaling +image: + repository: nginx + tag: latest + +containerPort: 80 + +gracefulShutdown: + trafficDrainSeconds: 25 + appShutdownTimeoutSeconds: 30 + +startupProbe: + httpGet: + path: / + port: 80 + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 1 + +livenessProbe: + httpGet: + path: / + port: 80 + failureThreshold: 2 + periodSeconds: 10 + +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + +datadog: + enabled: true + env: production + service: nginx-example-keda + version: "1.0.0" + +autoscaling: + type: keda + minReplicas: 2 + maxReplicas: 10 + keda: + pollingInterval: 30 + cooldownPeriod: 300 + triggers: + - type: cpu + metadata: + type: Utilization + value: "50" + +service: + enabled: true + type: ClusterIP + ports: + http: + targetPort: 80 + port: 80 + protocol: TCP diff --git a/http-service/examples/deployment-rollout.yaml b/http-service/examples/deployment-rollout.yaml new file mode 100644 index 00000000..8e4d8077 --- /dev/null +++ b/http-service/examples/deployment-rollout.yaml @@ -0,0 +1,68 @@ +# Deployment with Argo Rollouts canary strategy +image: + repository: nginx + tag: latest + +containerPort: 80 + +gracefulShutdown: + trafficDrainSeconds: 25 + appShutdownTimeoutSeconds: 30 + +startupProbe: + httpGet: + path: / + port: 80 + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 1 + +livenessProbe: + httpGet: + path: / + port: 80 + failureThreshold: 2 + periodSeconds: 10 + +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + +datadog: + enabled: true + env: production + service: nginx-example-rollout + version: "1.0.0" + +autoscaling: + type: hpa + minReplicas: 2 + maxReplicas: 10 + hpa: + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50 + +rollout: + enabled: true + strategy: + canary: + maxSurge: "25%" + maxUnavailable: 0 + +service: + enabled: true + type: ClusterIP + ports: + http: + targetPort: 80 + port: 80 + protocol: TCP diff --git a/http-service/examples/deployment.yaml b/http-service/examples/deployment.yaml index 403e1f6d..442b0cdc 100644 --- a/http-service/examples/deployment.yaml +++ b/http-service/examples/deployment.yaml @@ -1,128 +1,44 @@ -#fullnameOverride: nginx-example -#nameOverride: nginx-example +# Basic deployment with fixed replicas +image: + repository: nginx + tag: latest -deployment: - enabled: true - -strategy: - type: RollingUpdate - - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 +containerPort: 80 replicas: 2 -annotations: - a.b: c - -labels: - a/b: c - -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 10 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: app.kubernetes.io/instance - operator: In - values: - - nginx-example - topologyKey: kubernetes.io/hostname - -configmap: - name1: log-config1 - -secret: - name1: aws-credentials1 - -volumes: - - name: config-vol - configMap: - name: '{{ include "slime.fullname" . }}-{{ .Values.configmap.name1 }}' - items: - - key: log_level - path: log_level - -configmaps: - "{{ .Values.configmap.name1 }}": - labels: - hoge: "1" - annotations: - foo: a - data: - log_level: "ERROR" - - log-config2: - labels: - hoge: "2" - data: - log_level: "ERROR" - -secrets: - "{{ .Values.secret.name1 }}": - labels: - hoge: "1" - annotations: - foo: a - - data: - AWS_ACCESS_KEY: "YWZiY2RlYTEyMzQzCg==" - - aws-credentials2: - annotations: - foo: a - data: - AWS_ACCESS_KEY: "YWZiY2RlYTEyMzQzCg==" - -containers: - - name: nginx - image: - repository: nginx - tag: latest - - volumeMounts: - - name: config-vol - mountPath: /etc/config - - lifecycle: - preStop: - exec: - command: ["sh", "-c", "sleep 10"] - - ports: - - name: http - containerPort: 80 - protocol: TCP - - resources: - requests: - cpu: 0.1 - memory: 128Mi - limits: - cpu: 0.1 - memory: 128Mi - - - name: ubuntu-sleep-infinity - image: - repository: ubuntu - tag: latest - - command: ["sh", "-c", "sleep infinity"] - - envFrom: - - secretRef: - name: '{{ include "slime.fullname" . }}-{{ .Values.secret.name1 }}' - - resources: - requests: - cpu: 0.1 - memory: 64Mi - limits: - cpu: 0.1 - memory: 64Mi +gracefulShutdown: + trafficDrainSeconds: 25 + appShutdownTimeoutSeconds: 30 + +startupProbe: + httpGet: + path: / + port: 80 + failureThreshold: 5 + initialDelaySeconds: 10 + periodSeconds: 1 + +livenessProbe: + httpGet: + path: / + port: 80 + failureThreshold: 2 + periodSeconds: 10 + +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi + +datadog: + enabled: true + env: production + service: nginx-example + version: "1.0.0" service: enabled: true @@ -133,16 +49,11 @@ service: port: 80 protocol: TCP -autoscaling: - enabled: false - test: enabled: true containers: - - name: test - - image: - repository: curlimages/curl - tag: latest - - command: ["sh","-c","sleep 30; echo $APP_FULLNAME.$NAMESPACE; curl $APP_FULLNAME.$NAMESPACE"] + - name: test + image: + repository: curlimages/curl + tag: latest + command: ["sh", "-c", "curl -sf http://$APP_FULLNAME.$NAMESPACE"] From 62c75c25b92682a8af2ac751e10be7504b39ae53 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 16:25:36 +0900 Subject: [PATCH 39/44] Update Makefile for new example patterns Remove CronJob lint/test targets. Add lint targets for KEDA and Rollout examples. Test target only covers the basic deployment example (helm test requires a running cluster). Co-Authored-By: Claude Opus 4.6 --- http-service/Makefile | 60 +++++++++++++++---------------------------- 1 file changed, 21 insertions(+), 39 deletions(-) diff --git a/http-service/Makefile b/http-service/Makefile index b169986f..e2017320 100644 --- a/http-service/Makefile +++ b/http-service/Makefile @@ -6,7 +6,7 @@ install: helm upgrade -i -f examples/deployment.yaml --wait $(RELEASE) . .PHONY: lint -lint: lint-deployment lint-deployment-ingress lint-deployment-hpa lint-cronjob lint-cronjob-advanced +lint: lint-deployment lint-deployment-hpa lint-deployment-ingress lint-deployment-keda lint-deployment-rollout .PHONY: lint-deployment lint-deployment: @@ -15,13 +15,6 @@ lint-deployment: @echo "=> Validating examples/deployment.yaml" helm template -f examples/deployment.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error -.PHONY: lint-deployment-ingress -lint-deployment-ingress: - @echo "=> Linting examples/deployment-ingress.yaml" - helm lint --strict -f examples/deployment-ingress.yaml - @echo "=> Validating examples/deployment-ingress.yaml" - helm template -f examples/deployment-ingress.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error - .PHONY: lint-deployment-hpa lint-deployment-hpa: @echo "=> Linting examples/deployment-hpa.yaml" @@ -29,22 +22,29 @@ lint-deployment-hpa: @echo "=> Validating examples/deployment-hpa.yaml" helm template -f examples/deployment-hpa.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error -.PHONY: lint-cronjob -lint-cronjob: - @echo "=> Linting examples/cronjob.yaml" - helm lint --strict -f examples/cronjob.yaml - @echo "=> Validating examples/cronjob.yaml" - helm template -f examples/cronjob.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error +.PHONY: lint-deployment-ingress +lint-deployment-ingress: + @echo "=> Linting examples/deployment-ingress.yaml" + helm lint --strict -f examples/deployment-ingress.yaml + @echo "=> Validating examples/deployment-ingress.yaml" + helm template -f examples/deployment-ingress.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error + +.PHONY: lint-deployment-keda +lint-deployment-keda: + @echo "=> Linting examples/deployment-keda.yaml" + helm lint --strict -f examples/deployment-keda.yaml + @echo "=> Validating examples/deployment-keda.yaml" + helm template -f examples/deployment-keda.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error -.PHONY: lint-cronjob-advanced -lint-cronjob-advanced: - @echo "=> Linting examples/cronjob-advanced.yaml" - helm lint --strict -f examples/cronjob-advanced.yaml - @echo "=> Validating examples/cronjob-advanced.yaml" - helm template -f examples/cronjob-advanced.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error +.PHONY: lint-deployment-rollout +lint-deployment-rollout: + @echo "=> Linting examples/deployment-rollout.yaml" + helm lint --strict -f examples/deployment-rollout.yaml + @echo "=> Validating examples/deployment-rollout.yaml" + helm template -f examples/deployment-rollout.yaml . | kubeval --strict --ignore-missing-schemas --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master/ --kubernetes-version $(KUBERNETES_VERSION) --exit-on-error .PHONY: test -test: test-deployment test-cronjob test-cronjob-advanced +test: test-deployment .PHONY: test-deployment test-deployment: @@ -55,24 +55,6 @@ test-deployment: helm test $(RELEASE)-deployment --logs helm uninstall $(RELEASE)-deployment -.PHONY: test-cronjob -test-cronjob: - @if helm ls | grep -v "NAME" | cut -f1 | grep -e "^$(RELEASE)$$" > /dev/null; then helm uninstall $(RELEASE); fi - @echo "=> Testing examples/cronjob.yaml" - helm upgrade -i --wait -f examples/cronjob.yaml $(RELEASE)-cronjob . - sleep 30 - helm test $(RELEASE)-cronjob --logs - helm uninstall $(RELEASE)-cronjob - -.PHONY: test-cronjob-advanced -test-cronjob-advanced: - @if helm ls | grep -v "NAME" | cut -f1 | grep -e "^$(RELEASE)$$" > /dev/null; then helm uninstall $(RELEASE); fi - @echo "=> Testing examples/cronjob-advanced.yaml" - helm upgrade -i --wait -f examples/cronjob-advanced.yaml $(RELEASE)-cronjob-advanced . - sleep 30 - helm test $(RELEASE)-cronjob-advanced --logs - helm uninstall $(RELEASE)-cronjob-advanced - .PHONY: uninstall uninstall: @if helm ls | grep -v "NAME" | cut -f1 | grep -e "^$(RELEASE)$$" > /dev/null; then helm uninstall $(RELEASE); fi From e705b3f8f836c6d2b651d52a9dea254d724e19af Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 16:34:47 +0900 Subject: [PATCH 40/44] Add README for http-service chart Document opinionated defaults, required values, autoscaling types, Argo Rollouts integration, graceful shutdown design, and examples. Co-Authored-By: Claude Opus 4.6 --- http-service/README.md | 198 +++++++++++++++++------------------------ 1 file changed, 82 insertions(+), 116 deletions(-) diff --git a/http-service/README.md b/http-service/README.md index 417bec8a..094c2647 100644 --- a/http-service/README.md +++ b/http-service/README.md @@ -1,116 +1,82 @@ -# slime - -₍Ꙭ̂₎ < Not my bad slime - -₍Ꙭ̂₎ < I will transform into anything - -## TL;DR; - -``` -$ helm install chatwork/slime -``` - -## Prerequisites - -* Kubernetes 1.18+ - -## Installing the Chart - - -To install the chart with the release name `my-release`: - -``` -$ helm install --name my-release chatwork/slime -``` - -The command deploys the slime chart on the Kubernetes cluster in the default configuration. The [configuration](https://github.com/chatwork/charts/tree/master/slime#configuration) section lists the parameters that can be configured during installation. - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -``` -$ helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Difference from raw chart - -[raw chart](https://github.com/chatwork/charts/tree/master/raw) is is very useful, but it is too flexible and can be difficult to write if you are not used to helm. -Therefore, this chart has some format for writing. If it is a simple `deployment` + `service` + `ingress`, this chart is surely easier to write than `raw chart`, -but it does not allow you to define any resources(CRDS, cert-manager,...) freely. - -Use each charts as needed or use them together. - -## Configuration - -The following table lists the configurable parameters of the slime chart and their default values. - -| Parameter | Description | Default | -|--------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------------| -| `nameOverride` | Override name of app | `null` | -| `fullnameOverride` | Override the full qualified app name | `null` | -| `deployment.enabled` | Enable deployment | `false` | -| `strategy` | rolling update strategy for deployment | `{}` | -| `annotations` | annotations for deployment | `{}` | -| `labels` | labels for deployment | `{}` | -| `replicas` | replicas for deployment | `1` | -| `revisionHistoryLimit` | revisionHistoryLimit | `""` | -| `podAnnotations` | pod annotations | `{}` | -| `podLabels` | pod labels | `{}` | -| `podSecurityContext` | pod securityContext | `{}` | -| `affinity` | affinity | `{}` | -| `nodeSelector` | nodeSelector | `{}` | -| `imagePullSecrets` | imagePullSecrets | `[]` | -| `readinessGates` | readinessGates | `[]` | -| `priorityClassName` | priorityClassName | `""` | -| `progressDeadlineSeconds` | progressDeadlineSeconds | `""` | -| `volumes` | pod volumes(initContainers, containers) | `[]` | -| `containers` | application containers | `[]` | -| `initContainers.enabled` | if true, you can use initContainers | `false` | -| `initContainers.containers` | initContainers config | `[]` | -| `configmaps` | transform ConfigMap manifest. You can set `binaryData`, `data` | `{}` | -| `secrets` | transform Secret's manifest. You can set `data`, `stringData` and `type` | `{}` | -| `autoscaling.enabled` | if true, you can use hpa | `false` | -| `autoscaling.behavior` | autscaling behavior https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior | `{}` | -| `autoscaling.metrics` | autoscaling metrics | `[]` | -| `autoscaling.maxReplicas` | autoscaling maxReplicas | `2` | -| `autoscaling.minReplicas` | autoscaling minReplicas | `1` | -| `service.enabled` | if true, you can use service | `false` | -| `service.type` | service type(ClusterIP, NodePort, LoadBalancer) | `"ClusterIP"` | -| `service.ports` | service ports | `{}` | -| `clusterRole.enabled` | if true, you can use clusterRole | `false` | -| `clusterRole.rules` | clusterRole rules | `[]` | -| `role.enabled` | if true, you can use role | `false` | -| `role.rules` | role rules | `[]` | -| `serviceAccount.create` | if true, you can create serviceAccount | `false` | -| `serviceAccount.name` | if you create serviceAccount, you can set name | `null` | -| `serviceAccount.labels` | service account labels | `{}` | -| `serviceAccount.annotations` | serviceAccount annotations | `{}` | -| `podDisruptionBudget.enabled` | if ture, you can use podDisruptionBudget | `false` | -| `podDisruptionBudget.annotations` | podDisruptionBudget annotations | `{}` | -| `podDisruptionBudget.labels` | podDisruptionBudget labels | `{}` | -| `podDisruptionBudget.maxUnavailable` | podDisruptionBudget maxUnavailable | `null` | -| `podDisruptionBudget.minAvailable` | podDisruptionBudget minAvailable | `null` | -| `ingress.enabled` | if true, you can use ingress | `false` | -| `ingress.ingresses` | ingresses config | `{}` | -| `cronJob.enabled` | if true, you can use CronJob | `false` | -| `schedule` | CronJob's schedule | `""` | -| `cronJobRestartPolicy` | CronJob's restartPolicy | `OnFailure` | -| `concurrencyPolicy` | CronJob's concurrencyPolicy | `Allow` | -| `failedJobsHistoryLimit` | CronJob's failedJobsHistoryLimit | `1` | -| `startingDeadlineSeconds` | CronJob's startingDeadlineSeconds | `null` | -| `successfulJobsHistoryLimit` | CronJob's successfulJobsHistoryLimit | `3` | -| `suspend` | CronJob's suspend | `null` | -| `timeZone` | CronJob's timeZone https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#cronjob-v1-batch | `null` | -| `cronJobContainers` | CronJob's containers https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#jobtemplatespec-v1-batch | `[]` | -| `cronJobVolumes` | CronJob's pod volumes(initContainers, containers) | `[]` | -| `extraCronJobVolumes` | CronJob's extra pod volumes(initContainers, containers). Use when you want to add volume other than the common settings for each application. | `[]` | -| `test.enabled` | if true, you can use helm test | `false` | -| `test.containers` | helm test container config | `[]` | - - -# generate README - -This README is generated with https://github.com/rapidsai/frigate +# http-service + +An opinionated Helm chart for language-agnostic HTTP services with sensible defaults. + +## Install + +\`\`\`sh +helm repo add chatwork https://chatwork.github.io/charts +helm install my-service chatwork/http-service -f values.yaml +\`\`\` + +## Opinionated defaults + +| Concern | Default | Rationale | +|---------|---------|-----------| +| Strategy | \`RollingUpdate\` (maxSurge 25%, maxUnavailable 1) | Zero-downtime deploys | +| PDB | \`enabled: true\`, \`maxUnavailable: 1\` | Protect availability during node drain | +| Reloader | \`enabled: true\` | Auto-restart on ConfigMap/Secret changes | +| Datadog tags | \`enabled: true\` | Unified service tags (\`env\`, \`service\`, \`version\`) on Deployment and Pod | +| Graceful shutdown | Required (\`trafficDrainSeconds\` + \`appShutdownTimeoutSeconds\`) | Prevent traffic loss during termination | +| Probes | \`startupProbe\` and \`livenessProbe\` required, \`readinessProbe\` optional | Enforce health check discipline | + +## Required values + +The following values must be provided — the template will fail with an explicit error if they are missing: + +- \`image.repository\` / \`image.tag\` +- \`gracefulShutdown.trafficDrainSeconds\` — seconds to sleep in preStop, waiting for LB deregistration +- \`gracefulShutdown.appShutdownTimeoutSeconds\` — seconds the app gets after SIGTERM +- \`startupProbe\` +- \`livenessProbe\` +- \`datadog.env\` / \`datadog.service\` / \`datadog.version\` (when \`datadog.enabled: true\`) + +## Autoscaling + +Controlled by \`autoscaling.type\`: + +| Type | Description | +|------|-------------| +| \`none\` | Fixed replicas (set \`replicas\` value) | +| \`hpa\` | Kubernetes-native HorizontalPodAutoscaler | +| \`keda\` | KEDA ScaledObject (manages HPA internally) | + +When \`autoscaling.type\` is \`hpa\` or \`keda\`, setting \`replicas\` will cause a template error to prevent conflicts. + +## Argo Rollouts + +Set \`rollout.enabled: true\` to create a Rollout resource that references the Deployment via \`workloadRef\`. The HPA/KEDA \`scaleTargetRef\` automatically switches to target the Rollout. + +\`rollout.strategy\` is required when enabled. + +## Graceful shutdown + +The chart auto-generates \`terminationGracePeriodSeconds\` and a preStop hook: + +\`\`\` +|-- terminationGracePeriodSeconds (drain + app) --| +|-- preStop sleep (drain) --|-- app shutdown ------| + ^ + SIGTERM +\`\`\` + +The preStop sleep holds the container alive while the load balancer finishes deregistering the Pod. + +## Examples + +See [\`examples/\`](examples/) for values files covering common patterns: + +| File | Pattern | +|------|---------| +| \`deployment.yaml\` | Basic deployment with fixed replicas | +| \`deployment-hpa.yaml\` | HPA autoscaling | +| \`deployment-ingress.yaml\` | Ingress with ALB | +| \`deployment-keda.yaml\` | KEDA autoscaling | +| \`deployment-rollout.yaml\` | Argo Rollouts canary | + +## Lint & test + +\`\`\`sh +make lint # lint all examples +make test # deploy + helm test (requires cluster) +\`\`\` From ba41726e847c60dd899f9acebb7fd3e98f33da5c Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 17:12:14 +0900 Subject: [PATCH 41/44] Rename podDisruptionBudget to pod.disruptionBudget Align with existing pod.* namespace convention (pod.annotations, pod.labels, pod.securityContext). Co-Authored-By: Claude Opus 4.6 --- http-service/README.md | 2 +- http-service/templates/pdb.yaml | 10 +++++----- http-service/values.yaml | 13 ++++++------- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/http-service/README.md b/http-service/README.md index 094c2647..d1e38b5f 100644 --- a/http-service/README.md +++ b/http-service/README.md @@ -14,7 +14,7 @@ helm install my-service chatwork/http-service -f values.yaml | Concern | Default | Rationale | |---------|---------|-----------| | Strategy | \`RollingUpdate\` (maxSurge 25%, maxUnavailable 1) | Zero-downtime deploys | -| PDB | \`enabled: true\`, \`maxUnavailable: 1\` | Protect availability during node drain | +| PDB | \`pod.disruptionBudget.enabled: true\`, \`maxUnavailable: 1\` | Protect availability during node drain | | Reloader | \`enabled: true\` | Auto-restart on ConfigMap/Secret changes | | Datadog tags | \`enabled: true\` | Unified service tags (\`env\`, \`service\`, \`version\`) on Deployment and Pod | | Graceful shutdown | Required (\`trafficDrainSeconds\` + \`appShutdownTimeoutSeconds\`) | Prevent traffic loss during termination | diff --git a/http-service/templates/pdb.yaml b/http-service/templates/pdb.yaml index 153c90ea..aa359a07 100644 --- a/http-service/templates/pdb.yaml +++ b/http-service/templates/pdb.yaml @@ -1,23 +1,23 @@ -{{- if .Values.podDisruptionBudget.enabled }} +{{- if .Values.pod.disruptionBudget.enabled }} apiVersion: policy/v1 kind: PodDisruptionBudget metadata: - {{- with .Values.podDisruptionBudget.annotations }} + {{- with .Values.pod.disruptionBudget.annotations }} annotations: {{- toYaml . | nindent 4}} {{- end }} labels: {{- include "http-service.labels" . | nindent 4 }} - {{- with .Values.podDisruptionBudget.labels }} + {{- with .Values.pod.disruptionBudget.labels }} {{- toYaml . | nindent 4 }} {{- end }} name: {{ template "http-service.fullname" . }} namespace: {{ .Release.Namespace }} spec: - {{- with .Values.podDisruptionBudget.maxUnavailable }} + {{- with .Values.pod.disruptionBudget.maxUnavailable }} maxUnavailable: {{ . }} {{- end }} - {{- with .Values.podDisruptionBudget.minAvailable }} + {{- with .Values.pod.disruptionBudget.minAvailable }} minAvailable: {{ . }} {{- end }} selector: diff --git a/http-service/values.yaml b/http-service/values.yaml index f54acf1c..696c0fd4 100644 --- a/http-service/values.yaml +++ b/http-service/values.yaml @@ -40,6 +40,12 @@ pod: annotations: {} labels: {} securityContext: {} + disruptionBudget: + enabled: true # PDB enabled by default to protect availability + annotations: {} # PDB annotations + labels: {} # PDB labels + maxUnavailable: 1 # allow at most 1 pod unavailable during disruptions + minAvailable: # PDB minAvailable affinity: {} # affinity (e.g. node affinity for ARM, set via Helmfile settings) @@ -229,13 +235,6 @@ serviceAccount: labels: {} # service account labels annotations: {} # serviceAccount annotations -podDisruptionBudget: - enabled: true # PDB enabled by default to protect availability - annotations: {} # podDisruptionBudget annotations - labels: {} # podDisruptionBudget labels - maxUnavailable: 1 # allow at most 1 pod unavailable during disruptions - minAvailable: # podDisruptionBudget minAvailable - ingress: enabled: false # if true, you can use ingress ingresses: {} # name -> spec map, create as many Ingress resources as needed From 4c56da5ed07fa742c68238a5d12fc934aed105ff Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 17:53:17 +0900 Subject: [PATCH 42/44] Move autoscaling.type validation from hpa.yaml to deployment.yaml The type enum check is a general concern, not HPA-specific. deployment.yaml is always rendered, making it the right place for global validations alongside the existing replicas guard. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 3 +++ http-service/templates/hpa.yaml | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 6b5549ca..3783c812 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -22,6 +22,9 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: + {{- if not (has .Values.autoscaling.type (list "none" "hpa" "keda")) }} + {{- fail "autoscaling.type must be one of: none, hpa, keda" }} + {{- end }} {{- if and (ne .Values.autoscaling.type "none") .Values.replicas }} {{- fail "replicas should not be set when autoscaling.type is hpa or keda — the autoscaler manages replica count" }} {{- end }} diff --git a/http-service/templates/hpa.yaml b/http-service/templates/hpa.yaml index 725f20f3..8a918f8e 100644 --- a/http-service/templates/hpa.yaml +++ b/http-service/templates/hpa.yaml @@ -1,6 +1,3 @@ -{{- if not (has .Values.autoscaling.type (list "none" "hpa" "keda")) }} -{{- fail "autoscaling.type must be one of: none, hpa, keda" }} -{{- end }} {{- if eq .Values.autoscaling.type "hpa" }} apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler From 3084735d64e317bd9cba18c7a1c903b39dcf5551 Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 18:06:18 +0900 Subject: [PATCH 43/44] Remove remaining tpl calls and fix misleading required message - Replace tpl with plain variable/toYaml in configmap, secret, and test templates to eliminate all tpl usage from the chart. - Fix hpa.yaml required message: behavior is optional (wrapped in with), so the message should describe the hpa map, not behavior. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/configmap.yaml | 2 +- http-service/templates/hpa.yaml | 2 +- http-service/templates/secret.yaml | 2 +- http-service/templates/tests/test.yaml | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/http-service/templates/configmap.yaml b/http-service/templates/configmap.yaml index 330ff3de..27ba34da 100644 --- a/http-service/templates/configmap.yaml +++ b/http-service/templates/configmap.yaml @@ -4,7 +4,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ include "http-service.fullname" $root }}-{{ tpl $name $root }} + name: {{ include "http-service.fullname" $root }}-{{ $name }} namespace: {{ $root.Release.Namespace }} labels: {{- include "http-service.labels" $root | nindent 4 }} diff --git a/http-service/templates/hpa.yaml b/http-service/templates/hpa.yaml index 8a918f8e..02bd052c 100644 --- a/http-service/templates/hpa.yaml +++ b/http-service/templates/hpa.yaml @@ -7,7 +7,7 @@ metadata: labels: {{- include "http-service.labels" . | nindent 4 }} spec: - {{- with (required "autoscaling.hpa.behavior is required when type is hpa" .Values.autoscaling.hpa).behavior }} + {{- with (required "autoscaling.hpa is required when type is hpa" .Values.autoscaling.hpa).behavior }} behavior: {{- toYaml . | nindent 4 }} {{- end }} diff --git a/http-service/templates/secret.yaml b/http-service/templates/secret.yaml index 44429105..6f6616b3 100644 --- a/http-service/templates/secret.yaml +++ b/http-service/templates/secret.yaml @@ -4,7 +4,7 @@ apiVersion: v1 kind: Secret metadata: - name: {{ include "http-service.fullname" $root }}-{{ tpl $name $root }} + name: {{ include "http-service.fullname" $root }}-{{ $name }} namespace: {{ $root.Release.Namespace }} labels: {{- include "http-service.labels" $root | nindent 4 }} diff --git a/http-service/templates/tests/test.yaml b/http-service/templates/tests/test.yaml index 72406369..f974932a 100644 --- a/http-service/templates/tests/test.yaml +++ b/http-service/templates/tests/test.yaml @@ -34,11 +34,11 @@ spec: {{- end }} {{- with $container.envFrom }} envFrom: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- with $container.resources }} resources: - {{- tpl (. | toYaml) $root | nindent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} {{- end }} restartPolicy: Never From 0a26d43d644b64ffcebec163f2d52ebc20ced49c Mon Sep 17 00:00:00 2001 From: Tasuku Yamashita Date: Fri, 6 Mar 2026 18:11:08 +0900 Subject: [PATCH 44/44] Extract required checks into variables for readability - Datadog: validate env/service/version once at the top of deployment.yaml, use plain .Values references downstream - Probes: extract startupProbe/livenessProbe into variables - ScaledObject: extract $keda and $triggers variables, access pollingInterval/cooldownPeriod through $keda - Rollout: extract $strategy variable - HPA: already done in previous commit ($hpa, $metrics) Eliminates redundant required calls and inline required+toYaml nesting throughout all templates. Co-Authored-By: Claude Opus 4.6 --- http-service/templates/deployment.yaml | 23 +++++++++++++++-------- http-service/templates/hpa.yaml | 6 ++++-- http-service/templates/rollout.yaml | 3 ++- http-service/templates/scaled-object.yaml | 8 +++++--- 4 files changed, 26 insertions(+), 14 deletions(-) diff --git a/http-service/templates/deployment.yaml b/http-service/templates/deployment.yaml index 3783c812..37ad9d12 100644 --- a/http-service/templates/deployment.yaml +++ b/http-service/templates/deployment.yaml @@ -1,4 +1,9 @@ {{- $root := . }} +{{- if .Values.datadog.enabled }} +{{- $_ := required "datadog.env is required when datadog.enabled is true" .Values.datadog.env }} +{{- $_ := required "datadog.service is required when datadog.enabled is true" .Values.datadog.service }} +{{- $_ := required "datadog.version is required when datadog.enabled is true" .Values.datadog.version }} +{{- end }} apiVersion: apps/v1 kind: Deployment metadata: @@ -14,9 +19,9 @@ metadata: labels: {{- include "http-service.labels" . | nindent 4 }} {{- if .Values.datadog.enabled }} - tags.datadoghq.com/env: {{ required "datadog.env is required when datadog.enabled is true" .Values.datadog.env }} - tags.datadoghq.com/service: {{ required "datadog.service is required when datadog.enabled is true" .Values.datadog.service }} - tags.datadoghq.com/version: {{ required "datadog.version is required when datadog.enabled is true" .Values.datadog.version | quote }} + tags.datadoghq.com/env: {{ .Values.datadog.env }} + tags.datadoghq.com/service: {{ .Values.datadog.service }} + tags.datadoghq.com/version: {{ .Values.datadog.version | quote }} {{- end }} {{- with .Values.deployment.labels }} {{- toYaml . | nindent 4 }} @@ -50,9 +55,9 @@ spec: labels: {{- include "http-service.selectorLabels" . | nindent 8 }} {{- if .Values.datadog.enabled }} - tags.datadoghq.com/env: {{ required "datadog.env is required when datadog.enabled is true" .Values.datadog.env }} - tags.datadoghq.com/service: {{ required "datadog.service is required when datadog.enabled is true" .Values.datadog.service }} - tags.datadoghq.com/version: {{ required "datadog.version is required when datadog.enabled is true" .Values.datadog.version | quote }} + tags.datadoghq.com/env: {{ .Values.datadog.env }} + tags.datadoghq.com/service: {{ .Values.datadog.service }} + tags.datadoghq.com/version: {{ .Values.datadog.version | quote }} {{- end }} {{- with .Values.pod.labels }} {{- toYaml . | nindent 8 }} @@ -169,10 +174,12 @@ spec: command: - sleep - {{ $drainSeconds | quote }} + {{- $startupProbe := required "startupProbe is required" .Values.startupProbe }} + {{- $livenessProbe := required "livenessProbe is required" .Values.livenessProbe }} startupProbe: - {{- toYaml (required "startupProbe is required" .Values.startupProbe) | nindent 12 }} + {{- toYaml $startupProbe | nindent 12 }} livenessProbe: - {{- toYaml (required "livenessProbe is required" .Values.livenessProbe) | nindent 12 }} + {{- toYaml $livenessProbe | nindent 12 }} {{- with .Values.readinessProbe }} readinessProbe: {{- toYaml . | nindent 12 }} diff --git a/http-service/templates/hpa.yaml b/http-service/templates/hpa.yaml index 02bd052c..cb8064d5 100644 --- a/http-service/templates/hpa.yaml +++ b/http-service/templates/hpa.yaml @@ -7,12 +7,14 @@ metadata: labels: {{- include "http-service.labels" . | nindent 4 }} spec: - {{- with (required "autoscaling.hpa is required when type is hpa" .Values.autoscaling.hpa).behavior }} + {{- $hpa := required "autoscaling.hpa is required when type is hpa" .Values.autoscaling.hpa }} + {{- $metrics := required "autoscaling.hpa.metrics is required when type is hpa" $hpa.metrics }} + {{- with $hpa.behavior }} behavior: {{- toYaml . | nindent 4 }} {{- end }} metrics: - {{- toYaml (required "autoscaling.hpa.metrics is required when type is hpa" .Values.autoscaling.hpa.metrics) | nindent 4 }} + {{- toYaml $metrics | nindent 4 }} scaleTargetRef: {{- if .Values.rollout.enabled }} apiVersion: argoproj.io/v1alpha1 diff --git a/http-service/templates/rollout.yaml b/http-service/templates/rollout.yaml index 90278bd0..f9e5f008 100644 --- a/http-service/templates/rollout.yaml +++ b/http-service/templates/rollout.yaml @@ -17,6 +17,7 @@ spec: {{- with .Values.revisionHistoryLimit }} revisionHistoryLimit: {{ . }} {{- end }} + {{- $strategy := required "rollout.strategy is required when rollout.enabled is true" .Values.rollout.strategy }} strategy: - {{- toYaml (required "rollout.strategy is required when rollout.enabled is true" .Values.rollout.strategy) | nindent 4 }} + {{- toYaml $strategy | nindent 4 }} {{- end }} diff --git a/http-service/templates/scaled-object.yaml b/http-service/templates/scaled-object.yaml index 7bdb65c7..d2d5c90b 100644 --- a/http-service/templates/scaled-object.yaml +++ b/http-service/templates/scaled-object.yaml @@ -16,14 +16,16 @@ spec: kind: Deployment {{- end }} name: {{ include "http-service.fullname" . }} + {{- $keda := required "autoscaling.keda is required when type is keda" .Values.autoscaling.keda }} + {{- $triggers := required "autoscaling.keda.triggers is required when type is keda" $keda.triggers }} minReplicaCount: {{ required "autoscaling.minReplicas is required" .Values.autoscaling.minReplicas }} maxReplicaCount: {{ required "autoscaling.maxReplicas is required" .Values.autoscaling.maxReplicas }} - {{- with (required "autoscaling.keda is required when type is keda" .Values.autoscaling.keda).pollingInterval }} + {{- with $keda.pollingInterval }} pollingInterval: {{ . }} {{- end }} - {{- with .Values.autoscaling.keda.cooldownPeriod }} + {{- with $keda.cooldownPeriod }} cooldownPeriod: {{ . }} {{- end }} triggers: - {{- toYaml (required "autoscaling.keda.triggers is required when type is keda" .Values.autoscaling.keda.triggers) | nindent 4 }} + {{- toYaml $triggers | nindent 4 }} {{- end }}