Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
.env
__pycache__/
*.pyc
dashboard/data/
workspace/
ADWs/logs/
ADWs/__pycache__/
.claude/agent-memory/
Expand Down
8 changes: 8 additions & 0 deletions Dockerfile.dashboard
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,14 @@ COPY dashboard/backend/ dashboard/backend/
COPY social-auth/ social-auth/
COPY scheduler.py ./

# Copy workspace assets the backend reads at runtime.
# Without these, /api/agents, /api/skills, /api/commands etc. all return empty
# and the UI shows "No agents found" / "No skills found" on a fresh deploy.
# .claude/agent-memory and .claude/.env are excluded by .dockerignore so user
# data and secrets stay out of the image.
COPY .claude/ .claude/
COPY docs/ docs/

# Copy built frontend from stage 1
COPY --from=frontend-build /frontend/dist dashboard/frontend/dist

Expand Down
18 changes: 17 additions & 1 deletion dashboard/terminal-server/src/claude-bridge.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ class ClaudeBridge {
_loadProviderConfig() {
const ALLOWED_CLI = new Set(['claude', 'openclaude']);
const ALLOWED_VARS = new Set([
'ANTHROPIC_API_KEY',
'CLAUDE_CODE_USE_OPENAI', 'CLAUDE_CODE_USE_GEMINI',
'CLAUDE_CODE_USE_BEDROCK', 'CLAUDE_CODE_USE_VERTEX',
'OPENAI_BASE_URL', 'OPENAI_API_KEY', 'OPENAI_MODEL',
Expand Down Expand Up @@ -138,7 +139,22 @@ class ClaudeBridge {

async startSession(sessionId, options = {}) {
if (this.sessions.has(sessionId)) {
throw new Error(`Session ${sessionId} already exists`);
const existing = this.sessions.get(sessionId);
if (existing.active) {
// Idempotent: a duplicate startSession can arrive when the WebSocket
// reconnects through a reverse proxy (Traefik) and the frontend
// re-sends start_claude before learning the session is still alive.
// Returning the existing session instead of throwing prevents a
// confusing "Session already exists" toast on the user's terminal
// while keeping the original PTY intact.
console.log(`[bridge] startSession(${sessionId}) — already active, returning existing session`);
return existing;
}
// Orphaned dead session — clean up and restart
if (existing.process) {
try { existing.process.kill('SIGKILL'); } catch (_) {}
}
this.sessions.delete(sessionId);
}

const {
Expand Down
6 changes: 5 additions & 1 deletion dashboard/terminal-server/src/server.js
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,11 @@ class TerminalServer {
if (!session) return;

if (session.active) {
this.sendToWebSocket(wsInfo.ws, { type: 'error', message: 'An agent is already running in this session' });
// Frontend may re-send start_claude on WebSocket reconnect (common
// through reverse proxies like Traefik). The session is already
// running — replay the buffer and tell the client it's attached
// instead of surfacing a misleading error toast.
this.sendToWebSocket(wsInfo.ws, { type: 'claude_started', sessionId: wsInfo.claudeSessionId });
return;
}

Expand Down
13 changes: 8 additions & 5 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,25 +3,27 @@ services:
dashboard:
build:
context: .
dockerfile: Dockerfile.dashboard
dockerfile: Dockerfile.swarm.dashboard
container_name: evonexus-dashboard
ports:
- "${EVONEXUS_PORT:-8080}:8080"
- "8081:8080"
- "32352:32352"
env_file: .env
environment:
- TZ=America/Sao_Paulo
- EVONEXUS_PORT=8080
- TERMINAL_SERVER_PORT=32352
volumes:
- ./.env:/workspace/.env:ro
- ./config:/workspace/config:ro
- claude-auth:/root/.claude
- ./config:/workspace/config
- ./workspace:/workspace/workspace
- ./dashboard/data:/workspace/dashboard/data
- ./.claude/agents:/workspace/.claude/agents:ro
- ./.claude/skills:/workspace/.claude/skills:ro
- ./.claude/commands:/workspace/.claude/commands:ro
- ./.claude/templates:/workspace/.claude/templates:ro
- ./memory:/workspace/memory:ro
- ./ADWs/logs:/workspace/ADWs/logs:ro
- ./ADWs/logs:/workspace/ADWs/logs
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/api/version"]
Expand Down Expand Up @@ -79,6 +81,7 @@ services:
- manual

volumes:
claude-auth:
daily-logs:
projects:
community:
Expand Down
166 changes: 166 additions & 0 deletions evonexus.portainer.stack.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
version: "3.8"

# ============================================================
# EvoNexus - Stack de Produção (Docker Swarm / Portainer)
#
# Baseado na documentação oficial do projeto:
# - README.swarm.md
# - evonexus.stack.yml
#
# Ajustado para este servidor:
# - Domínio: evonexus.advancedbot.com.br
# - Rede Traefik: network_public
# - Entrypoint Traefik: websecure
# - Certresolver: letsencryptresolver
#
# Antes do deploy:
# 1. Garanta que a rede `network_public` já exista no Swarm
# 2. As imagens publicadas no Docker Hub usam a tag latest:
# - marcelolealhub/evo-nexus-dashboard:latest
# - marcelolealhub/evo-nexus-runtime:latest
# ============================================================

services:

evonexus_dashboard:
image: marcelolealhub/evo-nexus-dashboard:latest

volumes:
- evonexus_config:/workspace/config
- evonexus_workspace:/workspace/workspace
- evonexus_dashboard_data:/workspace/dashboard/data
- evonexus_memory:/workspace/memory
- evonexus_adw_logs:/workspace/ADWs/logs
- evonexus_agent_memory:/workspace/.claude/agent-memory
- evonexus_claude_auth:/root/.claude
- evonexus_codex_auth:/root/.codex

networks:
- network_public

environment:
- TZ=America/Sao_Paulo
- EVONEXUS_PORT=8080
- TERMINAL_SERVER_PORT=32352
- FORWARDED_ALLOW_IPS=*

deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints:
- node.role == manager
resources:
limits:
cpus: "1"
memory: 1024M
labels:
- traefik.enable=true
- traefik.docker.network=network_public

- traefik.http.routers.evonexus.rule=Host(`evonexus.advancedbot.com.br`)
- traefik.http.routers.evonexus.entrypoints=websecure
- traefik.http.routers.evonexus.priority=1
- traefik.http.routers.evonexus.tls.certresolver=letsencryptresolver
- traefik.http.routers.evonexus.service=evonexus
- traefik.http.services.evonexus.loadbalancer.server.port=8080
- traefik.http.services.evonexus.loadbalancer.passHostHeader=true

- traefik.http.routers.evonexus-terminal.rule=Host(`evonexus.advancedbot.com.br`) && PathPrefix(`/terminal`)
- traefik.http.routers.evonexus-terminal.entrypoints=websecure
- traefik.http.routers.evonexus-terminal.priority=10
- traefik.http.routers.evonexus-terminal.tls.certresolver=letsencryptresolver
- traefik.http.routers.evonexus-terminal.service=evonexus-terminal
- traefik.http.routers.evonexus-terminal.middlewares=evonexus-terminal-strip
- traefik.http.middlewares.evonexus-terminal-strip.stripprefix.prefixes=/terminal
- traefik.http.services.evonexus-terminal.loadbalancer.server.port=32352
- traefik.http.services.evonexus-terminal.loadbalancer.passHostHeader=true

evonexus_telegram:
image: marcelolealhub/evo-nexus-runtime:latest
command:
- "claude"
- "--channels"
- "plugin:telegram@claude-plugins-official"
- "--dangerously-skip-permissions"

volumes:
- evonexus_config:/workspace/config
- evonexus_workspace:/workspace/workspace
- evonexus_memory:/workspace/memory
- evonexus_adw_logs:/workspace/ADWs/logs
- evonexus_agent_memory:/workspace/.claude/agent-memory
- evonexus_claude_auth:/root/.claude
- evonexus_codex_auth:/root/.codex

networks:
- network_public

environment:
- TZ=America/Sao_Paulo
- REQUIRE_ANTHROPIC_KEY=1

stdin_open: true
tty: true

deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints:
- node.role == manager
resources:
limits:
cpus: "1"
memory: 1024M

evonexus_scheduler:
image: marcelolealhub/evo-nexus-runtime:latest
command: ["uv", "run", "python", "scheduler.py"]

volumes:
- evonexus_config:/workspace/config
- evonexus_workspace:/workspace/workspace
- evonexus_memory:/workspace/memory
- evonexus_adw_logs:/workspace/ADWs/logs
- evonexus_agent_memory:/workspace/.claude/agent-memory
- evonexus_claude_auth:/root/.claude
- evonexus_codex_auth:/root/.codex

networks:
- network_public

environment:
- TZ=America/Sao_Paulo
- REQUIRE_ANTHROPIC_KEY=1

deploy:
mode: replicated
replicas: 1
restart_policy:
condition: on-failure
placement:
constraints:
- node.role == manager
resources:
limits:
cpus: "1"
memory: 1024M

volumes:
evonexus_config:
evonexus_workspace:
evonexus_dashboard_data:
evonexus_memory:
evonexus_adw_logs:
evonexus_agent_memory:
evonexus_claude_auth:
evonexus_codex_auth:

networks:
network_public:
external: true
3 changes: 3 additions & 0 deletions evonexus.stack.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ services:
- evonexus_memory:/workspace/memory
- evonexus_adw_logs:/workspace/ADWs/logs
- evonexus_agent_memory:/workspace/.claude/agent-memory
- evonexus_claude_auth:/root/.claude
- evonexus_codex_auth:/root/.codex

networks:
Expand Down Expand Up @@ -96,6 +97,7 @@ services:
- evonexus_memory:/workspace/memory
- evonexus_adw_logs:/workspace/ADWs/logs
- evonexus_agent_memory:/workspace/.claude/agent-memory
- evonexus_claude_auth:/root/.claude
- evonexus_codex_auth:/root/.codex

networks:
Expand Down Expand Up @@ -155,6 +157,7 @@ volumes:
evonexus_memory:
evonexus_adw_logs:
evonexus_agent_memory:
evonexus_claude_auth:
evonexus_codex_auth:

networks:
Expand Down
53 changes: 53 additions & 0 deletions start-dashboard.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,59 @@ FLASK_PORT="${EVONEXUS_PORT:-8080}"

echo "[start-dashboard] terminal-server on :${TERMINAL_PORT}, Flask on :${FLASK_PORT}"

# ----------------------------------------------------------------------------
# Pre-seed Claude Code global settings so the first-run theme/onboarding
# prompts are skipped on every new agent terminal. Each agent runs in its
# own working directory, which Claude Code treats as a separate project —
# without this, the user has to pick a theme on every single agent.
# Only writes the file if it doesn't already exist (preserves user choices).
# ----------------------------------------------------------------------------
mkdir -p /root/.claude
if [ ! -f /root/.claude/settings.json ]; then
echo "[start-dashboard] seeding /root/.claude/settings.json with default theme"
cat > /root/.claude/settings.json <<'EOF'
{
"theme": "dark",
"hasCompletedOnboarding": true,
"hasSeenWelcome": true,
"telemetry": false
}
EOF
fi

# ----------------------------------------------------------------------------
# Restore /root/.claude.json from the most recent backup when missing.
#
# Claude Code's main config (theme, OAuth tokens, per-project state) lives
# at /root/.claude.json — a SIBLING of the /root/.claude/ directory, NOT
# inside it. The Swarm volume mounts /root/.claude/, so /root/.claude.json
# sits in the container's writable layer and is wiped on every redeploy.
# Result: theme picker and onboarding reappear on every release.
#
# Claude Code itself writes timestamped backups into /root/.claude/backups/
# (which IS in the volume). We just need to restore the latest on startup
# if the main file is missing. If no backup exists either, seed a minimal
# config so the first-run prompts are skipped.
# ----------------------------------------------------------------------------
if [ ! -f /root/.claude.json ]; then
latest_backup=$(ls -t /root/.claude/backups/.claude.json.backup.* 2>/dev/null | head -n1 || true)
if [ -n "${latest_backup:-}" ] && [ -f "${latest_backup}" ]; then
echo "[start-dashboard] restoring /root/.claude.json from ${latest_backup}"
cp "${latest_backup}" /root/.claude.json
else
echo "[start-dashboard] seeding minimal /root/.claude.json (no backup found)"
cat > /root/.claude.json <<'EOF'
{
"theme": "dark",
"hasCompletedOnboarding": true,
"hasSeenWelcome": true,
"bypassPermissionsModeAccepted": true,
"telemetry": false
}
EOF
fi
fi

# Start terminal-server in the background
node /workspace/dashboard/terminal-server/bin/server.js --port "${TERMINAL_PORT}" &
TERMINAL_PID=$!
Expand Down