Skip to content

Commit 3666750

Browse files
committed
## [1.0.1] - 2025-03-16
### Added - Kubernetes RBAC configuration - Health check endpoints - Startup probe - Liveness probe - Readiness probe ### Changed - Minikube test environment setup - Health check endpoints (!!!CURRENTLY NOT USED!!!) - Minikube RBAC configuration
1 parent 1555bf8 commit 3666750

File tree

8 files changed

+162
-47
lines changed

8 files changed

+162
-47
lines changed

CHANGELOG.md

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
2424

2525
### Security
2626

27+
## [1.0.1] - 2025-03-16
28+
29+
### Added
30+
31+
- Kubernetes RBAC configuration
32+
- Health check endpoints
33+
- Startup probe
34+
- Liveness probe
35+
- Readiness probe
36+
37+
### Changed
38+
39+
- Minikube test environment setup
40+
- Health check endpoints (!!!CURRENTLY NOT USED!!!)
41+
- Minikube RBAC configuration
42+
2743
## [1.0.0] - 2025-03-15
2844

2945
### Added

kubernetes/base/001-rbac.yaml

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
---
2+
apiVersion: v1
3+
kind: ServiceAccount
4+
metadata:
5+
name: xdatabase-proxy-sa
6+
---
7+
apiVersion: rbac.authorization.k8s.io/v1
8+
kind: ClusterRole
9+
metadata:
10+
name: xdatabase-proxy-role
11+
rules:
12+
- apiGroups: [""]
13+
resources: ["pods", "services", "endpoints", "secrets", "configmaps"]
14+
verbs: ["get", "list", "watch"]
15+
- apiGroups: [""]
16+
resources: ["events"]
17+
verbs: ["create", "patch"]
18+
- apiGroups: [""]
19+
resources: ["nodes"]
20+
verbs: ["list", "watch"]
21+
- apiGroups: ["apps"]
22+
resources: ["deployments", "daemonsets", "statefulsets", "replicasets"]
23+
verbs: ["get", "list", "watch"]
24+
- apiGroups: ["networking.k8s.io"]
25+
resources: ["ingresses"]
26+
verbs: ["get", "list", "watch"]
27+
- apiGroups: ["metrics.k8s.io"]
28+
resources: ["pods", "nodes"]
29+
verbs: ["get", "list", "watch"]
30+
---
31+
apiVersion: rbac.authorization.k8s.io/v1
32+
kind: ClusterRoleBinding
33+
metadata:
34+
name: xdatabase-proxy-role-binding
35+
subjects:
36+
- kind: ServiceAccount
37+
name: xdatabase-proxy-sa
38+
roleRef:
39+
kind: ClusterRole
40+
name: xdatabase-proxy-role
41+
apiGroup: rbac.authorization.k8s.io

kubernetes/base/deployment.yaml

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ spec:
1111
labels:
1212
app: xdatabase-proxy
1313
spec:
14+
serviceAccountName: xdatabase-proxy-sa
1415
containers:
1516
- name: xdatabase-proxy
1617
image: ghcr.io/hasirciogli/xdatabase-proxy:latest
@@ -24,13 +25,28 @@ spec:
2425
limits:
2526
cpu: 500m
2627
memory: 512Mi
27-
livenessProbe:
28+
startupProbe:
2829
tcpSocket:
2930
port: proxy-port
30-
initialDelaySeconds: 5
31+
failureThreshold: 30
3132
periodSeconds: 10
33+
livenessProbe:
34+
httpGet:
35+
path: /healthz
36+
port: proxy-port
37+
scheme: HTTP
38+
initialDelaySeconds: 15
39+
periodSeconds: 20
40+
timeoutSeconds: 5
41+
failureThreshold: 3
42+
successThreshold: 1
3243
readinessProbe:
33-
tcpSocket:
44+
httpGet:
45+
path: /ready
3446
port: proxy-port
47+
scheme: HTTP
3548
initialDelaySeconds: 5
3649
periodSeconds: 10
50+
timeoutSeconds: 3
51+
failureThreshold: 3
52+
successThreshold: 1

kubernetes/base/kustomization.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1
22
kind: Kustomization
33

44
resources:
5+
- 001-rbac.yaml
56
- deployment.yaml
67
- service.yaml
78
# - configmap.yaml

kubernetes/overlays/test/kustomization.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ resources:
88

99
patches:
1010
- path: replicas-patch.yaml
11+
# - path: override-rbac.yaml
12+
# - path: test-image-patch.yaml
1113
# configMapGenerator:
1214
# - name: xdatabase-proxy-config
1315
# behavior: merge
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
apiVersion: apps/v1
2+
kind: Deployment
3+
metadata:
4+
name: xdatabase-proxy
5+
spec:
6+
template:
7+
spec:
8+
containers:
9+
- name: xdatabase-proxy
10+
image: xdatabase-proxy-local-test:latest

main.go

Lines changed: 52 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -5,43 +5,61 @@ import (
55
"context"
66
"fmt"
77
"log"
8+
"net/http"
89
"os"
910
"os/signal"
11+
"sync/atomic"
1012
"syscall"
1113

1214
"github.com/hasirciogli/xdatabase-proxy/pkg/kubernetes"
1315
)
1416

15-
// func main() {
16-
// // Create proxies
17-
// postgresProxy := postgresql.NewPostgresProxy(3001, "localhost", 5432)
18-
// mysqlProxy := mysql.NewMySQLProxy(3002, "localhost", 3306)
19-
// mongoProxy := mongodb.NewMongoDBProxy(3003, "localhost", 27017)
20-
21-
// // Start PostgreSQL proxy
22-
// go func() {
23-
// if err := postgresProxy.Start(postgresProxy.ListenPort); err != nil {
24-
// log.Printf("PostgreSQL proxy error: %v", err)
25-
// }
26-
// }()
27-
28-
// // Start MySQL proxy
29-
// go func() {
30-
// if err := mysqlProxy.Start(mysqlProxy.ListenPort); err != nil {
31-
// log.Printf("MySQL proxy error: %v", err)
32-
// }
33-
// }()
34-
35-
// // Start MongoDB proxy
36-
// if err := mongoProxy.Start(mongoProxy.ListenPort); err != nil {
37-
// log.Printf("MongoDB proxy error: %v", err)
38-
// }
39-
// }
40-
41-
// sadece postgres proxy
17+
var (
18+
isReady atomic.Bool
19+
isHealthy atomic.Bool
20+
)
21+
22+
func setupHealthChecks() {
23+
// Set initial state
24+
isHealthy.Store(true)
25+
isReady.Store(true)
26+
27+
// Health check endpoint
28+
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
29+
if isHealthy.Load() {
30+
w.WriteHeader(http.StatusOK)
31+
w.Write([]byte("healthy"))
32+
return
33+
}
34+
w.WriteHeader(http.StatusServiceUnavailable)
35+
w.Write([]byte("unhealthy"))
36+
})
37+
38+
// Readiness check endpoint
39+
http.HandleFunc("/ready", func(w http.ResponseWriter, r *http.Request) {
40+
if isReady.Load() {
41+
w.WriteHeader(http.StatusOK)
42+
w.Write([]byte("ready"))
43+
return
44+
}
45+
w.WriteHeader(http.StatusServiceUnavailable)
46+
w.Write([]byte("not ready"))
47+
})
48+
49+
// Start HTTP server for health checks
50+
go func() {
51+
if err := http.ListenAndServe(":80", nil); err != nil {
52+
log.Printf("Health check server error: %v", err)
53+
}
54+
}()
55+
}
56+
4257
func main() {
58+
// Setup health check endpoints (!!!CURRENTLY NOT USED!!!)
59+
setupHealthChecks()
60+
4361
// Create a new Kubernetes client with specific context
44-
contextName := os.Getenv("KUBE_CONTEXT") // Take context name from environment variable
62+
contextName := os.Getenv("KUBE_CONTEXT")
4563
if contextName == "" {
4664
contextName = "local-test"
4765
}
@@ -60,6 +78,8 @@ func main() {
6078
log.Printf("Service Info: Name=%s, Namespace=%s, DB Type=%s, PooledConnection=%v, ClusterDNS=%s",
6179
svc.Name, svc.Namespace, svc.DatabaseType, svc.PooledConnection, svc.ClusterDNS)
6280
}
81+
// Mark as ready once we've successfully started watching services
82+
isReady.Store(true)
6383
}); err != nil {
6484
log.Fatalf("Failed to start watching: %v", err)
6585
}
@@ -71,5 +91,9 @@ func main() {
7191
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
7292
<-sigChan
7393

94+
// Mark as not ready and unhealthy during shutdown
95+
isReady.Store(false)
96+
isHealthy.Store(false)
97+
7498
log.Println("Shutting down...")
7599
}

scripts/setup-minikube.sh

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -3,25 +3,30 @@
33
# Exit on error
44
set -e
55

6-
echo "Starting Minikube cluster..."
7-
minikube start --memory=4096 --cpus=2
8-
9-
echo "Enabling ingress addon..."
10-
minikube addons enable ingress
11-
12-
echo "Building Docker image..."
13-
eval $(minikube docker-env)
14-
docker build -t ghcr.io/hasirciogli/xdatabase-proxy:latest .
15-
16-
echo "Creating namespaces..."
17-
kubectl create namespace test --dry-run=client -o yaml | kubectl apply -f -
18-
kubectl create namespace production --dry-run=client -o yaml | kubectl apply -f -
6+
echo "Starting Minikube cluster if not running..."
7+
if minikube status -p local-test | grep -q "Running"; then
8+
echo "Minikube cluster already running"
9+
else
10+
minikube start --memory=4096 --cpus=2 -p local-test
11+
fi
12+
13+
# echo "Building Docker image..."
14+
# eval $(minikube docker-env -p local-test)
15+
# docker build -t xdatabase-proxy-local-test:latest .
16+
17+
echo "Creating namespaces if not exists..."
18+
if minikube kubectl -p local-test -- get namespace test >/dev/null 2>&1; then
19+
echo "Namespace test already exists"
20+
else
21+
echo "Creating namespace test"
22+
minikube kubectl -p local-test -- create namespace test --dry-run=client -o yaml | minikube kubectl -p local-test -- apply -f -
23+
fi
1924

2025
echo "Deploying test environment..."
21-
kubectl kustomize kubernetes/overlays/test | kubectl apply -f -
26+
minikube kubectl -p local-test -- kustomize kubernetes/overlays/test | minikube kubectl -p local-test -- apply -f - -n test
2227

2328
echo "Waiting for deployment to be ready..."
24-
kubectl -n test rollout status deployment/xdatabase-proxy
29+
minikube kubectl -p local-test -- rollout status deployment/xdatabase-proxy -n test
2530

2631
echo "Running tests..."
2732
# Add your test commands here
@@ -32,4 +37,4 @@ echo "Running tests..."
3237
# pkill -f "port-forward"
3338

3439
echo "Setup complete! Your test environment is ready."
35-
echo "To access the proxy service, run: kubectl -n test port-forward svc/xdatabase-proxy 3001:3001"
40+
echo "To access the proxy service, run: minikube kubectl -p local-test -- port-forward svc/xdatabase-proxy 3001:3001 -n test"

0 commit comments

Comments
 (0)