forked from kubestellar/ui
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
258 lines (240 loc) · 7.8 KB
/
docker-compose.yml
File metadata and controls
258 lines (240 loc) · 7.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
services:
frontend:
build:
context: .
dockerfile: frontend/Dockerfile
args:
- VITE_BASE_URL=${VITE_BASE_URL:-http://localhost:4000}
- VITE_SKIP_PREREQUISITES_CHECK=${VITE_SKIP_PREREQUISITES_CHECK:-true}
- VITE_APP_VERSION=${VITE_APP_VERSION:-0.1.0}
ports:
- '${FRONTEND_PORT:-5173}:80'
depends_on:
backend:
condition: service_healthy
environment:
- VITE_SKIP_PREREQUISITES_CHECK=${VITE_SKIP_PREREQUISITES_CHECK:-true}
- VITE_BASE_URL=${VITE_BASE_URL:-http://localhost:4000}
- NGINX_HOST=${NGINX_HOST:-localhost}
- BACKEND_URL=${BACKEND_URL:-http://localhost:4000}
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
restart: unless-stopped
networks:
- kubestellar-network
backend:
build:
context: ./backend
dockerfile: Dockerfile
network_mode: host
volumes:
- ~/.kube:/root/.kube:ro
- ./backend/postgresql/migrations:/app/postgresql/migrations:ro
- ./backend/plugins:/app/plugins
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
# Application Configuration
- PORT=${BACKEND_PORT:-4000}
- GIN_MODE=${GIN_MODE:-release}
- JWT_SECRET=${JWT_SECRET:-your-super-secret-jwt-key-change-this-in-production}
# Service Configuration
- SERVICE_NAME=${SERVICE_NAME:-kubestellar-ui}
- SERVICE_VERSION=${SERVICE_VERSION:-1.0.0}
- ENVIRONMENT=${ENVIRONMENT:-production}
# Database Configuration
- DATABASE_URL=postgres://authuser:authpass123@127.0.0.1:5400/authdbui?sslmode=disable
- DB_HEALTH_TIMEOUT=${DB_HEALTH_TIMEOUT:-5s}
# Redis Configuration
- REDIS_HOST=127.0.0.1
- REDIS_PORT=6379
- REDIS_HEALTH_TIMEOUT=${REDIS_HEALTH_TIMEOUT:-3s}
# Health Check Configuration
- ENABLE_HEALTH_ENDPOINTS=${ENABLE_HEALTH_ENDPOINTS:-true}
- ENABLE_METRICS=${ENABLE_METRICS:-true}
- HEALTH_ENDPOINT=${HEALTH_ENDPOINT:-/health}
- LIVENESS_ENDPOINT=${LIVENESS_ENDPOINT:-/healthz}
- READINESS_ENDPOINT=${READINESS_ENDPOINT:-/readyz}
- METRICS_ENDPOINT=${METRICS_ENDPOINT:-/api/v1/metrics}
- HEALTH_CHECK_TIMEOUT=${HEALTH_CHECK_TIMEOUT:-10s}
- HEALTH_COMPONENTS=${HEALTH_COMPONENTS:-database,redis,memory,disk}
# Performance Configuration
- MEMORY_THRESHOLD=${MEMORY_THRESHOLD:-85.0}
- DISK_THRESHOLD=${DISK_THRESHOLD:-90.0}
- DISK_PATH=${DISK_PATH:-/}
# CORS Configuration
- CORS_ALLOWED_ORIGIN=${CORS_ALLOWED_ORIGIN:-http://localhost:5173}
# Plugins Configuration
- PLUGINS_DIRECTORY=${PLUGINS_DIRECTORY:-/app/plugins}
# GitHub Configuration
- STORAGE_PROVIDER=${STORAGE_PROVIDER:-git}
- GIT_REMOTE_URL=${GIT_REMOTE_URL:-https://github.com/username/reponame.git}
- GIT_BRANCH=${GIT_BRANCH:-main}
- GIT_BASE_URL=${GIT_BASE_URL:-https://raw.githubusercontent.com/username/reponame/main}
- GIT_TOKEN=${GIT_TOKEN:-YOUR-ACCESS-TOKEN}
healthcheck:
# Use the new optimized health endpoint
test: ["CMD", "curl", "-f", "http://localhost:4000/health"]
interval: 30s
timeout: 15s
retries: 5
start_period: 60s
restart: unless-stopped
postgres:
image: postgres:15-alpine
container_name: auth_postgres
environment:
POSTGRES_DB: authdbui
POSTGRES_USER: authuser
POSTGRES_PASSWORD: authpass123
# Performance optimizations
POSTGRES_SHARED_PRELOAD_LIBRARIES: pg_stat_statements
POSTGRES_MAX_CONNECTIONS: 100
POSTGRES_SHARED_BUFFERS: 256MB
POSTGRES_EFFECTIVE_CACHE_SIZE: 1GB
ports:
- '5400:5432'
volumes:
- postgres_data:/var/lib/postgresql/data
# - ./backend/postgresql/migrations:/docker-entrypoint-initdb.d:ro
healthcheck:
test: ["CMD-SHELL", "pg_isready -U authuser -d authdbui"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
restart: unless-stopped
networks:
- kubestellar-network
# Resource limits for better stability
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 256M
redis:
image: '${REDIS_IMAGE:-redis:7-alpine}'
container_name: '${REDIS_CONTAINER_NAME:-kubestellar-redis}'
ports:
- '${REDIS_PORT:-6379}:6379'
environment:
# Redis configuration
- REDIS_APPENDONLY=yes
- REDIS_MAXMEMORY=256mb
- REDIS_MAXMEMORY_POLICY=allkeys-lru
volumes:
- redis_data:/data
- ./redis/redis.conf:/usr/local/etc/redis/redis.conf:ro
command: redis-server /usr/local/etc/redis/redis.conf
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
start_period: 10s
restart: unless-stopped
networks:
- kubestellar-network
# Resource limits
deploy:
resources:
limits:
memory: 512M
reservations:
memory: 128M
# Prometheus - for metrics collection
prometheus:
image: prom/prometheus:latest
container_name: kubestellar-prometheus
network_mode: host
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--web.enable-lifecycle'
- '--web.listen-address=0.0.0.0:19090'
restart: unless-stopped
profiles:
- monitoring
# Grafana - for visualization
grafana:
image: grafana/grafana:latest
container_name: kubestellar-grafana
network_mode: host
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources:ro
environment:
- GF_SECURITY_ALLOW_EMBEDDING=true
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
- GF_SECURITY_ADMIN_USER=${GRAFANA_USER:-admin}
- GF_USERS_ALLOW_SIGN_UP=false
- GF_SERVER_HTTP_PORT=13000
depends_on:
- prometheus
restart: unless-stopped
profiles:
- monitoring
# Node Exporter for system metrics (optional)
node-exporter:
image: prom/node-exporter:latest
container_name: kubestellar-node-exporter
network_mode: host
volumes:
- /proc:/host/proc:ro
- /sys:/host/sys:ro
- /:/rootfs:ro
command:
- '--path.procfs=/host/proc'
- '--path.sysfs=/host/sys'
- '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)'
restart: unless-stopped
profiles:
- monitoring
# PostgreSQL Exporter for database metrics (optional)
postgres-exporter:
image: prometheuscommunity/postgres-exporter:latest
container_name: postgres-exporter
environment:
DATA_SOURCE_NAME: "postgresql://authuser:authpass123@localhost:5400/authdbui?sslmode=disable"
network_mode: host
depends_on:
postgres:
condition: service_healthy
restart: unless-stopped
profiles:
- monitoring
# Redis Exporter for Redis metrics (optional)
redis-exporter:
image: oliver006/redis_exporter:latest
container_name: redis-exporter
environment:
REDIS_ADDR: "redis://localhost:6379"
network_mode: host
depends_on:
redis:
condition: service_healthy
restart: unless-stopped
profiles:
- monitoring
volumes:
postgres_data: {}
redis_data: {}
prometheus_data: {}
grafana_data: {}
networks:
kubestellar-network:
driver: bridge