From f899063757f93fd3a58d1361b468bce623e85ccc Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Mon, 9 Mar 2026 22:51:29 -0400 Subject: [PATCH 1/3] feat: network deployment with TCP binding, auth, metrics, and remote client Add TCP listen mode (MCP2CLI_LISTEN_HOST/PORT) so the daemon can serve multiple machines over HTTP. Bearer token auth with timing-safe comparison protects authenticated endpoints while /health and /metrics remain open. Prometheus metrics at GET /metrics expose request counts, latency histograms, connection pool state, memory usage, and auth failures per service/tool. Remote client mode (MCP2CLI_REMOTE_URL) lets the CLI connect to a remote daemon instead of starting a local one. Includes a bash wrapper (scripts/mcp2cli-remote) for curl-only machines. LXC deployment files in deploy/ with systemd unit, env template, services config, and setup script. Ansible playbook lives in the infrastructure repo. Fixes idle timer firing immediately when timeoutMs=0 (TCP mode) by guarding touch() against zero/negative values. Co-Authored-By: Claude Opus 4.6 --- README.md | 148 ++++++++++++++++++++ deploy/env.example | 33 +++++ deploy/mcp2cli.service | 21 +++ deploy/services-server.json | 31 +++++ deploy/setup.sh | 142 +++++++++++++++++++ examples/services-server.json | 30 ++++ scripts/mcp2cli-remote | 79 +++++++++++ src/cli/commands/daemon.ts | 23 +++- src/daemon/auth.ts | 78 +++++++++++ src/daemon/idle.ts | 3 + src/daemon/index.ts | 83 ++++++++---- src/daemon/metrics.ts | 203 ++++++++++++++++++++++++++++ src/daemon/paths.ts | 33 ++++- src/daemon/server.ts | 44 +++++- src/daemon/types.ts | 16 +++ src/process/client.ts | 110 +++++++-------- src/process/index.ts | 1 + src/process/liveness.ts | 26 ++++ src/types/index.ts | 1 + tests/daemon/auth.test.ts | 90 ++++++++++++ tests/daemon/observability.test.ts | 5 +- tests/process/client-remote.test.ts | 112 +++++++++++++++ 22 files changed, 1218 insertions(+), 94 deletions(-) create mode 100644 deploy/env.example create mode 100644 deploy/mcp2cli.service create mode 100644 deploy/services-server.json create mode 100755 deploy/setup.sh create mode 100644 examples/services-server.json create mode 100755 scripts/mcp2cli-remote create mode 100644 src/daemon/auth.ts create mode 100644 src/daemon/metrics.ts create mode 100644 tests/daemon/auth.test.ts create mode 100644 tests/process/client-remote.test.ts diff --git a/README.md b/README.md index 5fa3612..a330260 100644 --- a/README.md +++ b/README.md @@ -422,6 +422,154 @@ In addition to the variables listed above, v1.3 adds: |----------|---------|-------------| | `MCP2CLI_CACHE_DIR` | `~/.cache/mcp2cli` | Base directory for schema cache and circuit breaker state | +## Network Deployment + +mcp2cli can run as a centralized TCP daemon, allowing multiple machines to share a single set of MCP server connections. Install and configure MCP backends once on a server, then connect from any machine using the CLI client or the bash wrapper (curl + jq only -- no Bun required). + +### Quick Start (TCP Mode) + +**Server** -- start the daemon with TCP binding: + +```bash +export MCP2CLI_LISTEN_HOST=0.0.0.0 +export MCP2CLI_LISTEN_PORT=9500 +export MCP2CLI_AUTH_TOKEN=$(openssl rand -hex 32) +MCP2CLI_DAEMON=1 mcp2cli +``` + +**Client** -- point any machine at the remote daemon: + +```bash +export MCP2CLI_REMOTE_URL=http://mcp-server.local:9500 +export MCP2CLI_AUTH_TOKEN= +mcp2cli n8n n8n_list_workflows --params '{}' +``` + +When `MCP2CLI_REMOTE_URL` is set, the CLI skips local daemon startup entirely and sends requests directly over HTTP. + +### Network Environment Variables + +In addition to the [base environment variables](#environment-variables), network mode adds: + +| Variable | Default | Description | +|----------|---------|-------------| +| `MCP2CLI_LISTEN_HOST` | (unset) | Bind address for TCP mode. Setting this enables TCP instead of Unix socket. Use `0.0.0.0` to listen on all interfaces | +| `MCP2CLI_LISTEN_PORT` | `9500` | TCP port when `MCP2CLI_LISTEN_HOST` is set | +| `MCP2CLI_AUTH_TOKEN` | (unset) | Bearer token for TCP authentication. Required for production deployments | +| `MCP2CLI_REMOTE_URL` | (unset) | URL of remote mcp2cli daemon (e.g. `http://mcp-server:9500`). Enables remote client mode | +| `MCP2CLI_CONFIG` | `~/.config/mcp2cli/services.json` | Path to service definitions (useful for server-side config in `/etc/mcp2cli/`) | + +### Authentication + +When `MCP2CLI_AUTH_TOKEN` is set on the server, all requests must include a `Bearer` token in the `Authorization` header. The token comparison uses timing-safe equality to prevent timing attacks. + +**Auth-exempt paths** -- these skip authentication so load balancers and monitoring can probe without credentials: +- `GET /health` -- health check with uptime, memory, and pool status +- `GET /metrics` -- Prometheus metrics endpoint + +### Prometheus Metrics + +The daemon exposes metrics at `GET /metrics` in Prometheus text exposition format. Key metrics: + +| Metric | Type | Description | +|--------|------|-------------| +| `mcp2cli_requests_total` | counter | Total requests by `{service, tool}` | +| `mcp2cli_requests_errors_total` | counter | Failed requests by `{service, tool}` | +| `mcp2cli_request_duration_ms` | histogram | Request latency with buckets (10ms - 30s) | +| `mcp2cli_requests_active` | gauge | Currently in-flight requests | +| `mcp2cli_pool_connections_active` | gauge | Current connection pool size | +| `mcp2cli_pool_services` | gauge | Connected services (`{service}` label) | +| `mcp2cli_connection_events_total` | counter | Connect/disconnect/health-check-failure by `{service}` | +| `mcp2cli_auth_failures_total` | counter | Total authentication failures | +| `mcp2cli_process_uptime_seconds` | gauge | Daemon uptime | +| `mcp2cli_process_memory_rss_bytes` | gauge | Resident set size | + +Add to your Prometheus config: + +```yaml +scrape_configs: + - job_name: mcp2cli + static_configs: + - targets: ['mcp-server.local:9500'] +``` + +### Bash Wrapper (curl-only clients) + +For machines that only have `curl` and `jq` (no Bun runtime), use the bash wrapper: + +```bash +# Install the wrapper +cp scripts/mcp2cli-remote /usr/local/bin/ +chmod +x /usr/local/bin/mcp2cli-remote + +# Configure +export MCP2CLI_REMOTE_URL=http://mcp-server.local:9500 +export MCP2CLI_AUTH_TOKEN= + +# Use it like the full CLI +mcp2cli-remote n8n n8n_list_workflows '{}' +``` + +### LXC Deployment + +The `deploy/` directory contains everything needed to run mcp2cli as a systemd service in an LXC container (or any Linux host): + +| File | Purpose | +|------|---------| +| `deploy/mcp2cli.service` | systemd unit file (hardened with `NoNewPrivileges`, `ProtectSystem=strict`) | +| `deploy/env.example` | Environment file template -- copy to `/etc/mcp2cli/env` | +| `deploy/services-server.json` | Example server-side service config | + +Setup: + +```bash +# Copy files into place +cp deploy/mcp2cli.service /etc/systemd/system/ +mkdir -p /etc/mcp2cli +cp deploy/env.example /etc/mcp2cli/env +cp deploy/services-server.json /etc/mcp2cli/services.json + +# Edit config +vim /etc/mcp2cli/env # set MCP2CLI_AUTH_TOKEN +vim /etc/mcp2cli/services.json # configure your MCP backends + +# Enable and start +useradd --system --no-create-home mcp2cli +systemctl daemon-reload +systemctl enable --now mcp2cli +``` + +### curl Examples + +```bash +SERVER=http://mcp-server.local:9500 +TOKEN=your-token-here + +# Health check (no auth required) +curl -s $SERVER/health | jq . + +# Prometheus metrics (no auth required) +curl -s $SERVER/metrics + +# List tools for a service +curl -s -X POST $SERVER/list-tools \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"service": "n8n"}' | jq . + +# Invoke a tool +curl -s -X POST $SERVER/call \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"service": "n8n", "tool": "n8n_list_workflows", "params": {}}' | jq . + +# Get a tool schema +curl -s -X POST $SERVER/schema \ + -H "Authorization: Bearer $TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"service": "n8n", "tool": "n8n_list_workflows"}' | jq . +``` + ## Development ```bash diff --git a/deploy/env.example b/deploy/env.example new file mode 100644 index 0000000..a66a9ed --- /dev/null +++ b/deploy/env.example @@ -0,0 +1,33 @@ +# mcp2cli Daemon Configuration +# Copy to /etc/mcp2cli/env and edit values + +# Enable daemon mode (required) +MCP2CLI_DAEMON=1 + +# Network binding -- listen on all interfaces +MCP2CLI_LISTEN_HOST=0.0.0.0 +MCP2CLI_LISTEN_PORT=9500 + +# Authentication token for TCP connections +# Generate a strong token and store in vaultwarden +# Example: openssl rand -hex 32 +MCP2CLI_AUTH_TOKEN=CHANGE_ME_USE_VAULTWARDEN + +# Path to service definitions +MCP2CLI_CONFIG=/etc/mcp2cli/services.json + +# Logging level: debug, info, warn, error +MCP2CLI_LOG_LEVEL=info + +# Idle timeout in seconds before shutting down inactive backends +# Defaults to 0 (disabled) in TCP mode -- backends stay alive +# MCP2CLI_IDLE_TIMEOUT=0 + +# Maximum concurrent backend processes in the pool +# MCP2CLI_POOL_MAX=10 + +# Per-tool invocation timeout in seconds +# MCP2CLI_TOOL_TIMEOUT=30 + +# Directory for schema and response caching +# MCP2CLI_CACHE_DIR=/var/lib/mcp2cli/cache diff --git a/deploy/mcp2cli.service b/deploy/mcp2cli.service new file mode 100644 index 0000000..0ba55e4 --- /dev/null +++ b/deploy/mcp2cli.service @@ -0,0 +1,21 @@ +[Unit] +Description=mcp2cli MCP Bridge Daemon +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +User=mcp2cli +Group=mcp2cli +EnvironmentFile=/etc/mcp2cli/env +ExecStart=/usr/local/bin/mcp2cli +Restart=on-failure +RestartSec=5 +NoNewPrivileges=true +ProtectSystem=strict +ProtectHome=true +ReadWritePaths=/var/lib/mcp2cli /var/log/mcp2cli +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/deploy/services-server.json b/deploy/services-server.json new file mode 100644 index 0000000..ad98115 --- /dev/null +++ b/deploy/services-server.json @@ -0,0 +1,31 @@ +{ + "services": { + "n8n": { + "backend": "stdio", + "command": "npx", + "args": ["-y", "@n8n/n8n-mcp"], + "env": { + "N8N_HOST": "http://10.71.20.30:5678", + "N8N_API_KEY": "${N8N_API_KEY}" + } + }, + "vaultwarden-secrets": { + "backend": "http", + "url": "http://10.71.20.14:3001/mcp" + }, + "notebooklm-mcp": { + "backend": "stdio", + "command": "uvx", + "args": ["notebooklm-mcp"], + "env": {} + }, + "homekit": { + "backend": "http", + "url": "http://10.71.1.69:9234/mcp", + "fallback": { + "command": "echo", + "args": ["homekit service not reachable"] + } + } + } +} diff --git a/deploy/setup.sh b/deploy/setup.sh new file mode 100755 index 0000000..a3331e1 --- /dev/null +++ b/deploy/setup.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash +# +# mcp2cli LXC Provisioning Script +# Sets up a dedicated container to run mcp2cli as a network daemon. +# +# Usage: +# ./setup.sh [path-to-mcp2cli-binary] +# + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +# --- Colors for output --- +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +log() { echo -e "${GREEN}[+]${NC} $*"; } +warn() { echo -e "${YELLOW}[!]${NC} $*"; } +err() { echo -e "${RED}[x]${NC} $*" >&2; } + +# --- 1. Check root --- +if [[ $EUID -ne 0 ]]; then + err "This script must be run as root" + exit 1 +fi + +# --- 2. Create system user/group --- +log "Creating mcp2cli system user and group..." +if ! getent group mcp2cli >/dev/null 2>&1; then + groupadd --system mcp2cli +fi +if ! getent passwd mcp2cli >/dev/null 2>&1; then + useradd --system --gid mcp2cli --home-dir /var/lib/mcp2cli --shell /usr/sbin/nologin mcp2cli +fi + +# --- 3. Install system dependencies --- +log "Installing system dependencies (curl, jq, unzip)..." +apt-get update -qq +apt-get install -y -qq curl jq unzip ca-certificates gnupg + +# --- 4. Install bun --- +if ! command -v bun >/dev/null 2>&1; then + log "Installing bun..." + curl -fsSL https://bun.sh/install | bash + # Make bun available system-wide + if [[ -f /root/.bun/bin/bun ]]; then + ln -sf /root/.bun/bin/bun /usr/local/bin/bun + ln -sf /root/.bun/bin/bunx /usr/local/bin/bunx + fi +else + log "bun already installed: $(bun --version)" +fi + +# --- 5. Install node/npm --- +if ! command -v node >/dev/null 2>&1; then + log "Installing Node.js via nodesource..." + curl -fsSL https://deb.nodesource.com/setup_lts.x | bash - + apt-get install -y -qq nodejs +else + log "Node.js already installed: $(node --version)" +fi + +# --- 6. Install uv (for uvx) --- +if ! command -v uv >/dev/null 2>&1; then + log "Installing uv..." + curl -LsSf https://astral.sh/uv/install.sh | sh + # Make uv/uvx available system-wide + if [[ -f /root/.local/bin/uv ]]; then + ln -sf /root/.local/bin/uv /usr/local/bin/uv + ln -sf /root/.local/bin/uvx /usr/local/bin/uvx + fi +else + log "uv already installed: $(uv --version)" +fi + +# --- 7. Create directories --- +log "Creating directories..." +mkdir -p /etc/mcp2cli +mkdir -p /var/lib/mcp2cli/cache +mkdir -p /var/log/mcp2cli +chown -R mcp2cli:mcp2cli /var/lib/mcp2cli /var/log/mcp2cli + +# --- 8. Install binary --- +if [[ $# -ge 1 && -f "$1" ]]; then + log "Installing mcp2cli binary from $1..." + cp "$1" /usr/local/bin/mcp2cli + chmod +x /usr/local/bin/mcp2cli +else + warn "No binary provided. Build and copy manually:" + warn " bun build src/cli.ts --compile --outfile mcp2cli" + warn " cp mcp2cli /usr/local/bin/mcp2cli" +fi + +# --- 9. Copy services config --- +log "Installing services config..." +cp "${SCRIPT_DIR}/services-server.json" /etc/mcp2cli/services.json +chown mcp2cli:mcp2cli /etc/mcp2cli/services.json +chmod 640 /etc/mcp2cli/services.json + +# --- 10. Copy env template (don't overwrite existing) --- +if [[ ! -f /etc/mcp2cli/env ]]; then + log "Installing env template..." + cp "${SCRIPT_DIR}/env.example" /etc/mcp2cli/env + chown mcp2cli:mcp2cli /etc/mcp2cli/env + chmod 600 /etc/mcp2cli/env +else + warn "/etc/mcp2cli/env already exists -- not overwriting" +fi + +# --- 11. Install systemd service --- +log "Installing systemd service..." +cp "${SCRIPT_DIR}/mcp2cli.service" /etc/systemd/system/mcp2cli.service +systemctl daemon-reload +systemctl enable mcp2cli.service + +# --- 12. Next steps --- +echo "" +log "Setup complete. Next steps:" +echo "" +echo " 1. Edit /etc/mcp2cli/env" +echo " - Set MCP2CLI_AUTH_TOKEN (use: openssl rand -hex 32)" +echo " - Store the token in vaultwarden for client config" +echo "" +echo " 2. Edit /etc/mcp2cli/services.json" +echo " - Update IPs/ports for your environment" +echo " - Set any required API keys via env var references" +echo "" +if [[ ! -f /usr/local/bin/mcp2cli ]]; then + echo " 3. Install the mcp2cli binary:" + echo " bun build src/cli.ts --compile --outfile mcp2cli" + echo " scp mcp2cli root@:/usr/local/bin/" + echo "" + echo " 4. Start the service:" +else + echo " 3. Start the service:" +fi +echo " systemctl start mcp2cli" +echo " journalctl -u mcp2cli -f" +echo "" diff --git a/examples/services-server.json b/examples/services-server.json new file mode 100644 index 0000000..f977608 --- /dev/null +++ b/examples/services-server.json @@ -0,0 +1,30 @@ +{ + "_doc": "Server-side MCP service configuration for network deployment", + "services": { + "n8n": { + "_comment": "n8n workflow automation via stdio", + "backend": "stdio", + "command": "npx", + "args": ["-y", "@n8n/n8n-mcp"], + "env": { "N8N_HOST": "http://your-n8n:5678", "N8N_API_KEY": "${N8N_API_KEY}" } + }, + "vaultwarden-secrets": { + "_comment": "Vaultwarden via HTTP", + "backend": "http", + "url": "http://your-vaultwarden:3001/mcp" + }, + "notebooklm-mcp": { + "_comment": "NotebookLM via stdio (uvx)", + "backend": "stdio", + "command": "uvx", + "args": ["notebooklm-mcp"], + "env": {} + }, + "homekit": { + "_comment": "HomeKit bridge -- fallback if unreachable", + "backend": "http", + "url": "http://your-mac:9234/mcp", + "fallback": { "command": "echo", "args": ["homekit not reachable"] } + } + } +} diff --git a/scripts/mcp2cli-remote b/scripts/mcp2cli-remote new file mode 100755 index 0000000..67a4df5 --- /dev/null +++ b/scripts/mcp2cli-remote @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +# mcp2cli-remote -- thin curl/jq wrapper for remote mcp2cli daemons. +# Use on machines without the binary installed. +set -euo pipefail + +if [[ -z "${MCP2CLI_REMOTE_URL:-}" ]]; then + echo "error: MCP2CLI_REMOTE_URL is not set" >&2 + exit 1 +fi + +BASE_URL="${MCP2CLI_REMOTE_URL%/}" +AUTH_HEADER="" +if [[ -n "${MCP2CLI_AUTH_TOKEN:-}" ]]; then + AUTH_HEADER="Authorization: Bearer ${MCP2CLI_AUTH_TOKEN}" +fi + +curl_opts=(-s -f --max-time 30) +if [[ -n "$AUTH_HEADER" ]]; then + curl_opts+=(-H "$AUTH_HEADER") +fi + +case "${1:-}" in + --help|-h|help) + cat <<'USAGE' +Usage: mcp2cli-remote --params '{...}' + mcp2cli-remote health + +Environment: + MCP2CLI_REMOTE_URL Remote daemon URL (required) + MCP2CLI_AUTH_TOKEN Bearer token for auth (optional) + +Commands: + health Check remote daemon health + Invoke an MCP tool on the remote daemon + --params '{}' JSON parameters for the tool call +USAGE + exit 0 + ;; + health) + curl "${curl_opts[@]}" -H "Content-Type: application/json" \ + "${BASE_URL}/health" | jq . + exit $? + ;; + "") + echo "error: no command specified. Use --help for usage." >&2 + exit 1 + ;; +esac + +SERVICE="$1" +TOOL="${2:-}" +if [[ -z "$TOOL" ]]; then + echo "error: tool name required. Usage: mcp2cli-remote --params '{...}'" >&2 + exit 1 +fi +shift 2 + +PARAMS="{}" +while [[ $# -gt 0 ]]; do + case "$1" in + --params) + PARAMS="${2:-{}}" + shift 2 + ;; + *) + echo "error: unknown flag: $1" >&2 + exit 1 + ;; + esac +done + +BODY=$(jq -n --arg s "$SERVICE" --arg t "$TOOL" --argjson p "$PARAMS" \ + '{service: $s, tool: $t, params: $p}') + +curl "${curl_opts[@]}" \ + -H "Content-Type: application/json" \ + -X POST \ + -d "$BODY" \ + "${BASE_URL}/call" | jq . diff --git a/src/cli/commands/daemon.ts b/src/cli/commands/daemon.ts index 6da8f0b..e2a1c6f 100644 --- a/src/cli/commands/daemon.ts +++ b/src/cli/commands/daemon.ts @@ -2,8 +2,12 @@ * Daemon management commands: stop and status. * Provides CLI control over the background daemon process. */ -import { getDaemonPaths } from "../../daemon/paths.ts"; -import { getDaemonStatus, cleanStaleDaemon } from "../../process/index.ts"; +import { getDaemonPaths, getRemoteConfig } from "../../daemon/paths.ts"; +import { + getDaemonStatus, + cleanStaleDaemon, + checkRemoteHealth, +} from "../../process/index.ts"; /** * Handle `mcp2cli daemon stop` -- stop the running daemon. @@ -41,9 +45,22 @@ export async function handleDaemonStop(_args: string[]): Promise { /** * Handle `mcp2cli daemon status` -- report daemon status. - * Checks PID file, process liveness, and health endpoint. + * Checks remote daemon if MCP2CLI_REMOTE_URL is set, + * otherwise checks PID file, process liveness, and health endpoint. */ export async function handleDaemonStatus(_args: string[]): Promise { + // Remote mode -- check remote daemon health and return early + const remote = getRemoteConfig(); + if (remote) { + const result = await checkRemoteHealth(remote.url, remote.token); + if (result.status === "ok") { + console.log(JSON.stringify({ status: "remote", url: remote.url, ...result.data as object })); + } else { + console.log(JSON.stringify({ status: "unreachable", url: remote.url })); + } + return; + } + const paths = getDaemonPaths(); const status = await getDaemonStatus(paths); diff --git a/src/daemon/auth.ts b/src/daemon/auth.ts new file mode 100644 index 0000000..273e76e --- /dev/null +++ b/src/daemon/auth.ts @@ -0,0 +1,78 @@ +/** + * Bearer token authentication for TCP daemon mode. + * Uses timing-safe comparison to prevent timing attacks. + * /health is exempt from auth for load balancer probes. + */ +import { createLogger } from "../logger/index.ts"; + +const log = createLogger("auth"); + +/** Paths that skip authentication (health checks, metrics scraping) */ +const AUTH_EXEMPT_PATHS = new Set(["/health", "/metrics"]); + +/** + * Load the auth token from MCP2CLI_AUTH_TOKEN env var. + * Returns undefined if not set (auth disabled). + */ +export function loadAuthToken(): string | undefined { + return process.env.MCP2CLI_AUTH_TOKEN || undefined; +} + +/** + * Check if a request path is exempt from authentication. + */ +export function isAuthExempt(path: string): boolean { + return AUTH_EXEMPT_PATHS.has(path); +} + +/** + * Validate bearer token from request Authorization header. + * Returns true if auth is disabled (no token configured) or token matches. + * Uses timing-safe comparison to prevent timing attacks. + */ +export function checkAuth(req: Request, expectedToken: string | undefined): boolean { + // No token configured -- auth disabled + if (!expectedToken) return true; + + const authHeader = req.headers.get("authorization"); + if (!authHeader) { + log.warn("auth_missing", { path: new URL(req.url).pathname }); + return false; + } + + // Extract bearer token + const match = authHeader.match(/^Bearer\s+(.+)$/i); + if (!match) { + log.warn("auth_malformed", { path: new URL(req.url).pathname }); + return false; + } + + const provided = match[1]!; + if (!timingSafeEqual(provided, expectedToken)) { + log.warn("auth_invalid", { path: new URL(req.url).pathname }); + return false; + } + + return true; +} + +/** + * Timing-safe string comparison. + * Compares all bytes regardless of where a mismatch occurs. + */ +function timingSafeEqual(a: string, b: string): boolean { + const encoder = new TextEncoder(); + const bufA = encoder.encode(a); + const bufB = encoder.encode(b); + + // Length difference leaks information, but we still compare all bytes + // of the longer string to maintain constant time + const len = Math.max(bufA.length, bufB.length); + let mismatch = bufA.length !== bufB.length ? 1 : 0; + + for (let i = 0; i < len; i++) { + mismatch |= (bufA[i] ?? 0) ^ (bufB[i] ?? 0); + } + + return mismatch === 0; +} diff --git a/src/daemon/idle.ts b/src/daemon/idle.ts index 6412fab..fd37d40 100644 --- a/src/daemon/idle.ts +++ b/src/daemon/idle.ts @@ -15,6 +15,9 @@ export class IdleTimer { /** Reset the idle countdown. Starts a new timer from now. */ touch(): void { + // timeoutMs === 0 means idle timer is disabled (e.g., TCP mode) + if (this.timeoutMs <= 0) return; + if (this.timer !== null) { clearTimeout(this.timer); } diff --git a/src/daemon/index.ts b/src/daemon/index.ts index ca51464..3cff0bb 100644 --- a/src/daemon/index.ts +++ b/src/daemon/index.ts @@ -2,15 +2,18 @@ * Daemon entry point. * Starts the long-running daemon process with PID file, idle timer, * connection pool, and signal handlers for graceful shutdown. + * Supports both Unix socket (local) and TCP (network) listen modes. */ import { mkdir, unlink } from "node:fs/promises"; import { dirname } from "node:path"; -import { getDaemonPaths } from "./paths.ts"; +import { getDaemonPaths, getDaemonListenConfig } from "./paths.ts"; import { ConnectionPool } from "./pool.ts"; import { IdleTimer } from "./idle.ts"; import { createDaemonServer } from "./server.ts"; import { loadConfig } from "../config/index.ts"; import { createLogger } from "../logger/index.ts"; +import { loadAuthToken } from "./auth.ts"; +import { MetricsCollector } from "./metrics.ts"; const log = createLogger("daemon"); @@ -19,34 +22,60 @@ const DEFAULT_IDLE_TIMEOUT_S = 60; /** * Start the daemon process. * Creates PID file, loads config, starts connection pool and HTTP server. + * In TCP mode: disables idle timer, skips PID/socket file management. */ export async function startDaemon(): Promise { - const paths = getDaemonPaths(); - - // Ensure runtime directory exists - await mkdir(dirname(paths.pidFile), { recursive: true }); - await mkdir(dirname(paths.socketPath), { recursive: true }); - - // Clean up stale socket/pid from previous crash - await unlink(paths.socketPath).catch(() => {}); - await unlink(paths.pidFile).catch(() => {}); - - // Write PID file - await Bun.write(paths.pidFile, String(process.pid) + "\n"); - log.info("daemon starting", { pid: process.pid, socket: paths.socketPath }); + const listenConfig = getDaemonListenConfig(); + const isTcp = listenConfig.mode === "tcp"; + + // Unix mode: manage PID and socket files + let paths: ReturnType | null = null; + if (!isTcp) { + paths = getDaemonPaths(); + + // Ensure runtime directory exists + await mkdir(dirname(paths.pidFile), { recursive: true }); + await mkdir(dirname(paths.socketPath), { recursive: true }); + + // Clean up stale socket/pid from previous crash + await unlink(paths.socketPath).catch(() => {}); + await unlink(paths.pidFile).catch(() => {}); + + // Write PID file + await Bun.write(paths.pidFile, String(process.pid) + "\n"); + log.info("daemon starting", { pid: process.pid, socket: paths.socketPath }); + } else { + log.info("daemon starting", { + pid: process.pid, + mode: "tcp", + host: listenConfig.hostname, + port: listenConfig.port, + }); + } // Load service configuration const config = await loadConfig(); - // Create connection pool + // Load auth token + const authToken = loadAuthToken(); + if (isTcp && !authToken) { + log.warn("no_auth_token", { + message: "TCP mode without MCP2CLI_AUTH_TOKEN -- daemon is unauthenticated", + }); + } + + // Create connection pool and metrics collector const pool = new ConnectionPool(); + const metrics = new MetricsCollector(); // Parse idle timeout from env (seconds -> ms) + // TCP mode: default to 0 (disabled) since it's a long-running network service + const defaultTimeout = isTcp ? 0 : DEFAULT_IDLE_TIMEOUT_S; const idleTimeoutS = parseInt( - process.env.MCP2CLI_IDLE_TIMEOUT ?? String(DEFAULT_IDLE_TIMEOUT_S), + process.env.MCP2CLI_IDLE_TIMEOUT ?? String(defaultTimeout), 10, ); - const idleTimeoutMs = (Number.isNaN(idleTimeoutS) ? DEFAULT_IDLE_TIMEOUT_S : idleTimeoutS) * 1000; + const idleTimeoutMs = (Number.isNaN(idleTimeoutS) ? defaultTimeout : idleTimeoutS) * 1000; // Graceful shutdown function let isShuttingDown = false; @@ -67,9 +96,11 @@ export async function startDaemon(): Promise { // Close all MCP connections (reuses McpTransport.close() multi-step shutdown) await pool.closeAll(); - // Remove socket and PID files - await unlink(paths.socketPath).catch(() => {}); - await unlink(paths.pidFile).catch(() => {}); + // Unix mode: remove socket and PID files + if (paths) { + await unlink(paths.socketPath).catch(() => {}); + await unlink(paths.pidFile).catch(() => {}); + } clearTimeout(forceTimer); process.exit(0); @@ -79,20 +110,22 @@ export async function startDaemon(): Promise { } }; - // Create idle timer + // Create idle timer (disabled when timeoutMs is 0) const idleTimer = new IdleTimer(idleTimeoutMs, () => { void gracefulShutdown(); }); // Create and start server const server = createDaemonServer({ - socketPath: paths.socketPath, + listenConfig, pool, config, idleTimer, onShutdown: () => { void gracefulShutdown(); }, + authToken, + metrics, }); // Install signal handlers @@ -100,6 +133,8 @@ export async function startDaemon(): Promise { process.on("SIGINT", () => void gracefulShutdown()); process.on("SIGHUP", () => void gracefulShutdown()); - // Start first idle countdown - idleTimer.touch(); + // Start first idle countdown (only if idle timer is enabled) + if (idleTimeoutMs > 0) { + idleTimer.touch(); + } } diff --git a/src/daemon/metrics.ts b/src/daemon/metrics.ts new file mode 100644 index 0000000..6e9bdf0 --- /dev/null +++ b/src/daemon/metrics.ts @@ -0,0 +1,203 @@ +/** + * Prometheus metrics collector for mcp2cli daemon. + * Exposes request metrics, connection pool state, and system health + * in Prometheus text exposition format at GET /metrics. + */ + +/** Histogram bucket boundaries for request duration (ms) */ +const DURATION_BUCKETS = [10, 25, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 30000]; + +interface RequestMetric { + count: number; + errorCount: number; + totalDurationMs: number; + /** Histogram buckets: count of requests <= bucket boundary */ + buckets: number[]; +} + +interface ConnectionEvent { + connects: number; + disconnects: number; + healthCheckFailures: number; +} + +/** + * Centralized metrics collector. + * Thread-safe for single-threaded Bun runtime. + */ +export class MetricsCollector { + private readonly requests = new Map(); + private readonly connections = new Map(); + private activeRequests = 0; + private peakActiveRequests = 0; + private totalRequests = 0; + private readonly startTime = Date.now(); + + /** Record start of a request (increments active count) */ + onRequestStart(): void { + this.activeRequests++; + if (this.activeRequests > this.peakActiveRequests) { + this.peakActiveRequests = this.activeRequests; + } + } + + /** Record end of a request with service/tool/status/duration */ + onRequestEnd( + service: string, + tool: string, + success: boolean, + durationMs: number, + ): void { + this.activeRequests = Math.max(0, this.activeRequests - 1); + this.totalRequests++; + + const key = `${service}.${tool}`; + let metric = this.requests.get(key); + if (!metric) { + metric = { + count: 0, + errorCount: 0, + totalDurationMs: 0, + buckets: new Array(DURATION_BUCKETS.length).fill(0) as number[], + }; + this.requests.set(key, metric); + } + + metric.count++; + if (!success) metric.errorCount++; + metric.totalDurationMs += durationMs; + + // Update histogram buckets + for (let i = 0; i < DURATION_BUCKETS.length; i++) { + if (durationMs <= DURATION_BUCKETS[i]!) { + metric.buckets[i]!++; + } + } + } + + /** Record a connection event */ + onConnect(service: string): void { + this.getOrCreateConnection(service).connects++; + } + + onDisconnect(service: string): void { + this.getOrCreateConnection(service).disconnects++; + } + + onHealthCheckFailure(service: string): void { + this.getOrCreateConnection(service).healthCheckFailures++; + } + + /** Record an auth failure */ + private authFailures = 0; + onAuthFailure(): void { + this.authFailures++; + } + + /** + * Render all metrics in Prometheus text exposition format. + * @param poolSize Current connection pool size + * @param poolServices List of connected service names + */ + render(poolSize: number, poolServices: string[]): string { + const lines: string[] = []; + + // -- Process metrics -- + const mem = process.memoryUsage(); + lines.push("# HELP mcp2cli_process_uptime_seconds Daemon uptime in seconds"); + lines.push("# TYPE mcp2cli_process_uptime_seconds gauge"); + lines.push(`mcp2cli_process_uptime_seconds ${((Date.now() - this.startTime) / 1000).toFixed(1)}`); + + lines.push("# HELP mcp2cli_process_memory_rss_bytes Resident set size in bytes"); + lines.push("# TYPE mcp2cli_process_memory_rss_bytes gauge"); + lines.push(`mcp2cli_process_memory_rss_bytes ${mem.rss}`); + + lines.push("# HELP mcp2cli_process_memory_heap_used_bytes V8 heap used in bytes"); + lines.push("# TYPE mcp2cli_process_memory_heap_used_bytes gauge"); + lines.push(`mcp2cli_process_memory_heap_used_bytes ${mem.heapUsed}`); + + lines.push("# HELP mcp2cli_process_memory_heap_total_bytes V8 heap total in bytes"); + lines.push("# TYPE mcp2cli_process_memory_heap_total_bytes gauge"); + lines.push(`mcp2cli_process_memory_heap_total_bytes ${mem.heapTotal}`); + + // -- Connection pool metrics -- + lines.push("# HELP mcp2cli_pool_connections_active Current active connections"); + lines.push("# TYPE mcp2cli_pool_connections_active gauge"); + lines.push(`mcp2cli_pool_connections_active ${poolSize}`); + + lines.push("# HELP mcp2cli_pool_services Connected service names"); + lines.push("# TYPE mcp2cli_pool_services gauge"); + for (const svc of poolServices) { + lines.push(`mcp2cli_pool_services{service="${svc}"} 1`); + } + + // -- Connection lifecycle metrics -- + lines.push("# HELP mcp2cli_connection_events_total Connection lifecycle events"); + lines.push("# TYPE mcp2cli_connection_events_total counter"); + for (const [service, events] of this.connections) { + lines.push(`mcp2cli_connection_events_total{service="${service}",event="connect"} ${events.connects}`); + lines.push(`mcp2cli_connection_events_total{service="${service}",event="disconnect"} ${events.disconnects}`); + lines.push(`mcp2cli_connection_events_total{service="${service}",event="health_check_failure"} ${events.healthCheckFailures}`); + } + + // -- Request metrics -- + lines.push("# HELP mcp2cli_requests_active Currently in-flight requests"); + lines.push("# TYPE mcp2cli_requests_active gauge"); + lines.push(`mcp2cli_requests_active ${this.activeRequests}`); + + lines.push("# HELP mcp2cli_requests_active_peak Peak concurrent requests since start"); + lines.push("# TYPE mcp2cli_requests_active_peak gauge"); + lines.push(`mcp2cli_requests_active_peak ${this.peakActiveRequests}`); + + lines.push("# HELP mcp2cli_requests_total Total requests by service and tool"); + lines.push("# TYPE mcp2cli_requests_total counter"); + lines.push("# HELP mcp2cli_requests_errors_total Total failed requests"); + lines.push("# TYPE mcp2cli_requests_errors_total counter"); + lines.push("# HELP mcp2cli_requests_duration_ms_sum Total request duration in ms"); + lines.push("# TYPE mcp2cli_requests_duration_ms_sum counter"); + + for (const [key, metric] of this.requests) { + const [service, tool] = key.split(".", 2); + const labels = `service="${service}",tool="${tool}"`; + lines.push(`mcp2cli_requests_total{${labels}} ${metric.count}`); + lines.push(`mcp2cli_requests_errors_total{${labels}} ${metric.errorCount}`); + lines.push(`mcp2cli_requests_duration_ms_sum{${labels}} ${metric.totalDurationMs.toFixed(0)}`); + } + + // -- Request duration histogram -- + lines.push("# HELP mcp2cli_request_duration_ms Request duration histogram"); + lines.push("# TYPE mcp2cli_request_duration_ms histogram"); + for (const [key, metric] of this.requests) { + const [service, tool] = key.split(".", 2); + const labels = `service="${service}",tool="${tool}"`; + for (let i = 0; i < DURATION_BUCKETS.length; i++) { + lines.push(`mcp2cli_request_duration_ms_bucket{${labels},le="${DURATION_BUCKETS[i]}"} ${metric.buckets[i]}`); + } + lines.push(`mcp2cli_request_duration_ms_bucket{${labels},le="+Inf"} ${metric.count}`); + lines.push(`mcp2cli_request_duration_ms_count{${labels}} ${metric.count}`); + lines.push(`mcp2cli_request_duration_ms_sum{${labels}} ${metric.totalDurationMs.toFixed(0)}`); + } + + // -- Auth metrics -- + lines.push("# HELP mcp2cli_auth_failures_total Total auth failures"); + lines.push("# TYPE mcp2cli_auth_failures_total counter"); + lines.push(`mcp2cli_auth_failures_total ${this.authFailures}`); + + // -- Info metric -- + lines.push("# HELP mcp2cli_info Build info"); + lines.push("# TYPE mcp2cli_info gauge"); + lines.push(`mcp2cli_info{version="0.2.0"} 1`); + + lines.push(""); + return lines.join("\n"); + } + + private getOrCreateConnection(service: string): ConnectionEvent { + let entry = this.connections.get(service); + if (!entry) { + entry = { connects: 0, disconnects: 0, healthCheckFailures: 0 }; + this.connections.set(service, entry); + } + return entry; + } +} diff --git a/src/daemon/paths.ts b/src/daemon/paths.ts index 22f4981..820f2ee 100644 --- a/src/daemon/paths.ts +++ b/src/daemon/paths.ts @@ -1,9 +1,10 @@ /** * Runtime path resolution for daemon PID file and Unix socket. * Supports XDG_RUNTIME_DIR, macOS fallback, and env var overrides. + * Also resolves TCP listen config and remote connection config. */ import { join } from "node:path"; -import type { DaemonPaths } from "./types.ts"; +import type { DaemonPaths, DaemonListenConfig } from "./types.ts"; const APP_DIR = "mcp2cli"; const PID_FILENAME = "daemon.pid"; @@ -18,6 +19,36 @@ const SOCKET_FILENAME = "daemon.sock"; * 3. XDG_RUNTIME_DIR/mcp2cli/ * 4. ~/.config/mcp2cli/run/ (macOS/fallback) */ +const DEFAULT_TCP_PORT = 9500; + +/** + * Determine how the daemon should listen based on env vars. + * - MCP2CLI_LISTEN_HOST set: TCP mode (network-accessible) + * - Otherwise: Unix socket mode (local only) + */ +export function getDaemonListenConfig(): DaemonListenConfig { + const host = process.env.MCP2CLI_LISTEN_HOST; + if (host) { + const port = parseInt(process.env.MCP2CLI_LISTEN_PORT ?? String(DEFAULT_TCP_PORT), 10); + return { + mode: "tcp", + hostname: host, + port: Number.isNaN(port) ? DEFAULT_TCP_PORT : port, + }; + } + return { mode: "unix", socketPath: getDaemonPaths().socketPath }; +} + +/** + * Get remote daemon connection config from env vars. + * Returns null if MCP2CLI_REMOTE_URL is not set. + */ +export function getRemoteConfig(): { url: string; token: string | undefined } | null { + const url = process.env.MCP2CLI_REMOTE_URL; + if (!url) return null; + return { url, token: process.env.MCP2CLI_AUTH_TOKEN }; +} + export function getDaemonPaths(overrides?: { runtimeDir?: string; }): DaemonPaths { diff --git a/src/daemon/server.ts b/src/daemon/server.ts index 4cc09c7..3951ecb 100644 --- a/src/daemon/server.ts +++ b/src/daemon/server.ts @@ -1,6 +1,7 @@ /** - * Daemon HTTP server over Unix domain socket. - * Handles /call, /list-tools, /schema, /health, /shutdown endpoints. + * Daemon HTTP server over Unix domain socket or TCP. + * Handles /call, /list-tools, /schema, /health, /metrics, /shutdown endpoints. + * Supports bearer token auth for TCP mode. */ // Server type inferred from Bun.serve() return import type { ServicesConfig } from "../config/index.ts"; @@ -12,6 +13,7 @@ import type { DaemonSchemaRequest, DaemonCallResponse, DaemonErrorResponse, + DaemonListenConfig, } from "./types.ts"; import { formatToolResult } from "../invocation/format.ts"; import { listToolsForService, getToolSchema } from "../schema/introspect.ts"; @@ -19,16 +21,20 @@ import { ConnectionError } from "../connection/errors.ts"; import { ToolError } from "../invocation/errors.ts"; import type { ErrorCode } from "../types/index.ts"; import { createLogger } from "../logger/index.ts"; +import { checkAuth, isAuthExempt } from "./auth.ts"; +import type { MetricsCollector } from "./metrics.ts"; const log = createLogger("server"); const reqLog = createLogger("daemon:request"); interface DaemonServerOptions { - socketPath: string; + listenConfig: DaemonListenConfig; pool: ConnectionPool; config: ServicesConfig; idleTimer: IdleTimer; onShutdown: () => void; + authToken: string | undefined; + metrics: MetricsCollector; } function errorResponse( @@ -45,26 +51,39 @@ function errorResponse( } /** - * Create the daemon HTTP server bound to a Unix socket. + * Create the daemon HTTP server bound to a Unix socket or TCP port. * Returns the Bun.serve() server instance. */ export function createDaemonServer(opts: DaemonServerOptions) { - const { socketPath, pool, config, idleTimer, onShutdown } = opts; + const { listenConfig, pool, config, idleTimer, onShutdown, authToken, metrics } = opts; + + // Build listen options based on mode + const listenOpts = listenConfig.mode === "unix" + ? { unix: listenConfig.socketPath } + : { hostname: listenConfig.hostname, port: listenConfig.port }; return Bun.serve({ - unix: socketPath, + ...listenOpts, async fetch(req: Request): Promise { const url = new URL(req.url); const path = url.pathname; log.debug("request received", { method: req.method, path }); + // Auth check (exempt paths skip this) + if (!isAuthExempt(path) && !checkAuth(req, authToken)) { + metrics.onAuthFailure(); + return errorResponse("AUTH_ERROR", "Unauthorized", undefined, 401); + } + // POST /call -- invoke a tool if (path === "/call" && req.method === "POST") { idleTimer.onRequestStart(); + metrics.onRequestStart(); const startTime = performance.now(); let callService = "unknown"; let callTool = "unknown"; + let success = false; try { const body = (await req.json()) as DaemonCallRequest; callService = body.service; @@ -98,6 +117,7 @@ export function createDaemonServer(opts: DaemonServerOptions) { const duration = Math.round(performance.now() - startTime); reqLog.info("tool_call", { service: callService, tool: callTool, duration, result: "success" }); + success = true; const formatted = formatToolResult( sdkResult as Parameters[0], ); @@ -116,6 +136,8 @@ export function createDaemonServer(opts: DaemonServerOptions) { } return handleEndpointError(err, pool); } finally { + const duration = Math.round(performance.now() - startTime); + metrics.onRequestEnd(callService, callTool, success, duration); idleTimer.onRequestEnd(); } } @@ -162,7 +184,7 @@ export function createDaemonServer(opts: DaemonServerOptions) { } } - // GET /health -- health check + // GET /health -- health check (auth-exempt) if (path === "/health" && req.method === "GET") { const mem = process.memoryUsage(); return Response.json({ @@ -178,6 +200,14 @@ export function createDaemonServer(opts: DaemonServerOptions) { }); } + // GET /metrics -- Prometheus metrics (auth-exempt) + if (path === "/metrics" && req.method === "GET") { + const body = metrics.render(pool.size, pool.serviceNames); + return new Response(body, { + headers: { "Content-Type": "text/plain; version=0.0.4; charset=utf-8" }, + }); + } + // POST /shutdown -- graceful shutdown if (path === "/shutdown" && req.method === "POST") { // Return response FIRST, then schedule shutdown diff --git a/src/daemon/types.ts b/src/daemon/types.ts index b5096fa..9a5e1bd 100644 --- a/src/daemon/types.ts +++ b/src/daemon/types.ts @@ -46,3 +46,19 @@ export interface DaemonErrorResponse { /** Discriminated union of all daemon responses */ export type DaemonResponse = DaemonCallResponse | DaemonErrorResponse; + +/** Unix domain socket listen config */ +export interface UnixListenConfig { + mode: "unix"; + socketPath: string; +} + +/** TCP network listen config */ +export interface TcpListenConfig { + mode: "tcp"; + hostname: string; + port: number; +} + +/** Discriminated union for daemon listen mode */ +export type DaemonListenConfig = UnixListenConfig | TcpListenConfig; diff --git a/src/process/client.ts b/src/process/client.ts index 4649d45..29a60e8 100644 --- a/src/process/client.ts +++ b/src/process/client.ts @@ -1,12 +1,13 @@ /** * CLI-side daemon communication client. * Starts the daemon if needed, sends requests over Unix socket. + * Supports remote daemon connections via MCP2CLI_REMOTE_URL. */ import { mkdir, stat, unlink } from "node:fs/promises"; import { open } from "node:fs/promises"; import { dirname } from "node:path"; import { constants } from "node:fs"; -import { getDaemonPaths } from "../daemon/paths.ts"; +import { getDaemonPaths, getRemoteConfig } from "../daemon/paths.ts"; import { ConnectionError } from "../connection/errors.ts"; import { getDaemonStatus, cleanStaleDaemon } from "./liveness.ts"; import type { DaemonPaths } from "../daemon/types.ts"; @@ -146,63 +147,77 @@ export async function ensureDaemon(paths: DaemonPaths): Promise { } /** - * Send a tool call request to the daemon. + * Shared fetch helper that routes to remote or local daemon. + * - Remote: uses MCP2CLI_REMOTE_URL with Bearer token, skips ensureDaemon() + * - Local: ensures daemon is running, fetches via Unix socket */ -export async function callViaDaemon( - request: DaemonCallRequest, +async function fetchDaemon( + path: string, + body?: unknown, ): Promise { - const paths = getDaemonPaths(); - await ensureDaemon(paths); + const remote = getRemoteConfig(); try { - const response = await fetch("http://localhost/call", { - unix: paths.socketPath, - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(request), - signal: AbortSignal.timeout(REQUEST_TIMEOUT_MS), - }); + let response: Response; + + if (remote) { + // Remote mode -- direct HTTP to remote daemon + const url = `${remote.url.replace(/\/$/, "")}${path}`; + const headers: Record = { + "Content-Type": "application/json", + }; + if (remote.token) { + headers["Authorization"] = `Bearer ${remote.token}`; + } + response = await fetch(url, { + method: "POST", + headers, + body: body !== undefined ? JSON.stringify(body) : undefined, + signal: AbortSignal.timeout(REQUEST_TIMEOUT_MS), + }); + } else { + // Local mode -- Unix socket + const paths = getDaemonPaths(); + await ensureDaemon(paths); + response = await fetch(`http://localhost${path}`, { + unix: paths.socketPath, + method: "POST", + headers: { "Content-Type": "application/json" }, + body: body !== undefined ? JSON.stringify(body) : undefined, + signal: AbortSignal.timeout(REQUEST_TIMEOUT_MS), + }); + } + return (await response.json()) as DaemonResponse; } catch (err) { const message = err instanceof Error ? err.message : String(err); + const target = remote ? `remote daemon at ${remote.url}` : "daemon"; return { success: false, error: { code: "CONNECTION_ERROR", - message: `Failed to communicate with daemon: ${message}`, + message: `Failed to communicate with ${target}: ${message}`, }, }; } } +/** + * Send a tool call request to the daemon. + */ +export async function callViaDaemon( + request: DaemonCallRequest, +): Promise { + return fetchDaemon("/call", request); +} + /** * Send a list-tools request to the daemon. */ export async function listToolsViaDaemon( request: DaemonListToolsRequest, ): Promise { - const paths = getDaemonPaths(); - await ensureDaemon(paths); - - try { - const response = await fetch("http://localhost/list-tools", { - unix: paths.socketPath, - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(request), - signal: AbortSignal.timeout(REQUEST_TIMEOUT_MS), - }); - return (await response.json()) as DaemonResponse; - } catch (err) { - const message = err instanceof Error ? err.message : String(err); - return { - success: false, - error: { - code: "CONNECTION_ERROR", - message: `Failed to communicate with daemon: ${message}`, - }, - }; - } + return fetchDaemon("/list-tools", request); } /** @@ -211,26 +226,5 @@ export async function listToolsViaDaemon( export async function getSchemaViaDaemon( request: DaemonSchemaRequest, ): Promise { - const paths = getDaemonPaths(); - await ensureDaemon(paths); - - try { - const response = await fetch("http://localhost/schema", { - unix: paths.socketPath, - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify(request), - signal: AbortSignal.timeout(REQUEST_TIMEOUT_MS), - }); - return (await response.json()) as DaemonResponse; - } catch (err) { - const message = err instanceof Error ? err.message : String(err); - return { - success: false, - error: { - code: "CONNECTION_ERROR", - message: `Failed to communicate with daemon: ${message}`, - }, - }; - } + return fetchDaemon("/schema", request); } diff --git a/src/process/index.ts b/src/process/index.ts index 549b3f7..314fcd5 100644 --- a/src/process/index.ts +++ b/src/process/index.ts @@ -7,5 +7,6 @@ export { getDaemonStatus, isDaemonAlive, cleanStaleDaemon, + checkRemoteHealth, } from "./liveness.ts"; export type { DaemonStatus } from "./types.ts"; diff --git a/src/process/liveness.ts b/src/process/liveness.ts index 9905308..0eab33a 100644 --- a/src/process/liveness.ts +++ b/src/process/liveness.ts @@ -71,3 +71,29 @@ export async function cleanStaleDaemon(paths: DaemonPaths): Promise { await unlink(paths.pidFile).catch(() => {}); await unlink(paths.socketPath).catch(() => {}); } + +/** + * Check health of a remote daemon via HTTP. + * Returns status "ok" with response data on success, + * or "unreachable" on any error. + */ +export async function checkRemoteHealth( + url: string, + token: string | undefined, +): Promise<{ status: "ok" | "unreachable"; data?: unknown }> { + try { + const headers: Record = {}; + if (token) { + headers["Authorization"] = `Bearer ${token}`; + } + const response = await fetch(`${url.replace(/\/$/, "")}/health`, { + method: "GET", + headers, + signal: AbortSignal.timeout(5000), + }); + const data = await response.json(); + return { status: "ok", data }; + } catch { + return { status: "unreachable" }; + } +} diff --git a/src/types/index.ts b/src/types/index.ts index 9f828b6..7431450 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -12,6 +12,7 @@ export type ErrorCode = | "TOOL_TIMEOUT" | "UNKNOWN_COMMAND" | "TOOL_BLOCKED" + | "AUTH_ERROR" | "INTERNAL_ERROR"; /** diff --git a/tests/daemon/auth.test.ts b/tests/daemon/auth.test.ts new file mode 100644 index 0000000..811e399 --- /dev/null +++ b/tests/daemon/auth.test.ts @@ -0,0 +1,90 @@ +import { describe, test, expect } from "bun:test"; +import { checkAuth, isAuthExempt, loadAuthToken } from "../../src/daemon/auth.ts"; + +function makeReq(path: string, authHeader?: string): Request { + const headers: Record = {}; + if (authHeader) headers["Authorization"] = authHeader; + return new Request(`http://localhost${path}`, { headers }); +} + +describe("auth", () => { + describe("isAuthExempt", () => { + test("exempts /health", () => { + expect(isAuthExempt("/health")).toBe(true); + }); + + test("exempts /metrics", () => { + expect(isAuthExempt("/metrics")).toBe(true); + }); + + test("does not exempt /call", () => { + expect(isAuthExempt("/call")).toBe(false); + }); + + test("does not exempt /shutdown", () => { + expect(isAuthExempt("/shutdown")).toBe(false); + }); + }); + + describe("checkAuth", () => { + test("returns true when no token configured (auth disabled)", () => { + expect(checkAuth(makeReq("/call"), undefined)).toBe(true); + }); + + test("returns false when token configured but no header", () => { + expect(checkAuth(makeReq("/call"), "secret")).toBe(false); + }); + + test("returns false with malformed auth header", () => { + expect(checkAuth(makeReq("/call", "Basic abc123"), "secret")).toBe(false); + }); + + test("returns false with wrong token", () => { + expect(checkAuth(makeReq("/call", "Bearer wrong"), "secret")).toBe(false); + }); + + test("returns true with correct token", () => { + expect(checkAuth(makeReq("/call", "Bearer secret"), "secret")).toBe(true); + }); + + test("case-insensitive Bearer prefix", () => { + expect(checkAuth(makeReq("/call", "bearer secret"), "secret")).toBe(true); + }); + + test("timing-safe: different lengths still compared", () => { + // Should not short-circuit on length mismatch + expect(checkAuth(makeReq("/call", "Bearer ab"), "secret-long-token")).toBe(false); + }); + }); + + describe("loadAuthToken", () => { + test("returns undefined when env not set", () => { + const orig = process.env.MCP2CLI_AUTH_TOKEN; + delete process.env.MCP2CLI_AUTH_TOKEN; + expect(loadAuthToken()).toBeUndefined(); + if (orig !== undefined) process.env.MCP2CLI_AUTH_TOKEN = orig; + }); + + test("returns token when env is set", () => { + const orig = process.env.MCP2CLI_AUTH_TOKEN; + process.env.MCP2CLI_AUTH_TOKEN = "test-token"; + expect(loadAuthToken()).toBe("test-token"); + if (orig !== undefined) { + process.env.MCP2CLI_AUTH_TOKEN = orig; + } else { + delete process.env.MCP2CLI_AUTH_TOKEN; + } + }); + + test("returns undefined for empty string", () => { + const orig = process.env.MCP2CLI_AUTH_TOKEN; + process.env.MCP2CLI_AUTH_TOKEN = ""; + expect(loadAuthToken()).toBeUndefined(); + if (orig !== undefined) { + process.env.MCP2CLI_AUTH_TOKEN = orig; + } else { + delete process.env.MCP2CLI_AUTH_TOKEN; + } + }); + }); +}); diff --git a/tests/daemon/observability.test.ts b/tests/daemon/observability.test.ts index ded9178..33cb216 100644 --- a/tests/daemon/observability.test.ts +++ b/tests/daemon/observability.test.ts @@ -58,6 +58,7 @@ mock.module("../../src/connection/index.ts", () => ({ const { ConnectionPool } = await import("../../src/daemon/pool.ts"); const { createDaemonServer } = await import("../../src/daemon/server.ts"); const { IdleTimer } = await import("../../src/daemon/idle.ts"); +const { MetricsCollector } = await import("../../src/daemon/metrics.ts"); const testConfig = { services: { @@ -107,11 +108,13 @@ describe("Daemon Observability", () => { const socketPath = join(tempDir, `test-${Date.now()}.sock`); const idleTimer = new IdleTimer(60000, () => {}); const server = createDaemonServer({ - socketPath, + listenConfig: { mode: "unix", socketPath }, pool, config: testConfig, idleTimer, onShutdown: () => {}, + authToken: undefined, + metrics: new MetricsCollector(), }); servers.push(server); return server; diff --git a/tests/process/client-remote.test.ts b/tests/process/client-remote.test.ts new file mode 100644 index 0000000..e678981 --- /dev/null +++ b/tests/process/client-remote.test.ts @@ -0,0 +1,112 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test"; +import { getRemoteConfig } from "../../src/daemon/paths.ts"; +import { checkRemoteHealth } from "../../src/process/liveness.ts"; + +describe("getRemoteConfig", () => { + const originalUrl = process.env.MCP2CLI_REMOTE_URL; + const originalToken = process.env.MCP2CLI_AUTH_TOKEN; + + afterEach(() => { + // Restore original env + if (originalUrl !== undefined) { + process.env.MCP2CLI_REMOTE_URL = originalUrl; + } else { + delete process.env.MCP2CLI_REMOTE_URL; + } + if (originalToken !== undefined) { + process.env.MCP2CLI_AUTH_TOKEN = originalToken; + } else { + delete process.env.MCP2CLI_AUTH_TOKEN; + } + }); + + test("returns null when MCP2CLI_REMOTE_URL is not set", () => { + delete process.env.MCP2CLI_REMOTE_URL; + delete process.env.MCP2CLI_AUTH_TOKEN; + const result = getRemoteConfig(); + expect(result).toBeNull(); + }); + + test("returns config when MCP2CLI_REMOTE_URL is set", () => { + process.env.MCP2CLI_REMOTE_URL = "http://10.0.0.5:9500"; + delete process.env.MCP2CLI_AUTH_TOKEN; + const result = getRemoteConfig(); + expect(result).toEqual({ + url: "http://10.0.0.5:9500", + token: undefined, + }); + }); + + test("returns config with token when both env vars set", () => { + process.env.MCP2CLI_REMOTE_URL = "http://10.0.0.5:9500"; + process.env.MCP2CLI_AUTH_TOKEN = "test-secret-token"; + const result = getRemoteConfig(); + expect(result).toEqual({ + url: "http://10.0.0.5:9500", + token: "test-secret-token", + }); + }); +}); + +describe("checkRemoteHealth", () => { + let server: ReturnType; + let baseUrl: string; + + beforeEach(() => { + server = Bun.serve({ + port: 0, // random available port + fetch(req) { + const url = new URL(req.url); + + if (url.pathname === "/health") { + // Check auth if token header present + const auth = req.headers.get("authorization"); + if (auth === "Bearer bad-token") { + return new Response(JSON.stringify({ error: "unauthorized" }), { + status: 401, + }); + } + return Response.json({ + status: "ok", + uptime: 1234, + services: 3, + }); + } + + return new Response("Not Found", { status: 404 }); + }, + }); + baseUrl = `http://localhost:${server.port}`; + }); + + afterEach(() => { + server.stop(true); + }); + + test("returns ok with data for healthy server", async () => { + const result = await checkRemoteHealth(baseUrl, undefined); + expect(result.status).toBe("ok"); + expect(result.data).toEqual({ + status: "ok", + uptime: 1234, + services: 3, + }); + }); + + test("returns ok with token auth", async () => { + const result = await checkRemoteHealth(baseUrl, "valid-token"); + expect(result.status).toBe("ok"); + expect(result.data).toBeDefined(); + }); + + test("returns unreachable for non-existent server", async () => { + const result = await checkRemoteHealth("http://127.0.0.1:1", undefined); + expect(result.status).toBe("unreachable"); + expect(result.data).toBeUndefined(); + }); + + test("handles trailing slash in URL", async () => { + const result = await checkRemoteHealth(`${baseUrl}/`, undefined); + expect(result.status).toBe("ok"); + }); +}); From 0612ac38eae86d7341b085c094dc855e596170a9 Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Tue, 10 Mar 2026 00:04:51 -0400 Subject: [PATCH 2/3] feat: web management UI with RBAC auth and CI/CD deploy pipeline Phase 5 implementation: - ConfigManager with CRUD operations and disk persistence - REST API for service management (add/update/remove/import/reload) - Embedded web UI with dark theme, login (username+password and token tabs) - Pluggable AuthProvider interface with TokenAuthProvider (RBAC) - Three roles: admin (full), agent (tools+read), viewer (read-only) - CI/CD deploy pipeline: build, deploy to CT 216, health check, auto-rollback - Cross-compile for Linux x64 in deploy job - tokens.json gitignored (secrets) Co-Authored-By: Claude Opus 4.6 --- .github/workflows/ci.yml | 92 ++++ .gitignore | 3 + src/daemon/auth-provider.ts | 256 ++++++++++++ src/daemon/auth.ts | 87 +++- src/daemon/config-manager.ts | 250 +++++++++++ src/daemon/index.ts | 24 +- src/daemon/server.ts | 217 +++++++++- src/daemon/ui.ts | 626 ++++++++++++++++++++++++++++ tests/daemon/api.test.ts | 145 +++++++ tests/daemon/auth-provider.test.ts | 331 +++++++++++++++ tests/daemon/config-manager.test.ts | 171 ++++++++ tests/daemon/observability.test.ts | 3 +- tests/daemon/timeout.test.ts | 2 + tests/integration/daemon.test.ts | 2 + 14 files changed, 2174 insertions(+), 35 deletions(-) create mode 100644 src/daemon/auth-provider.ts create mode 100644 src/daemon/config-manager.ts create mode 100644 src/daemon/ui.ts create mode 100644 tests/daemon/api.test.ts create mode 100644 tests/daemon/auth-provider.test.ts create mode 100644 tests/daemon/config-manager.test.ts diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ec5fd77..24e5d96 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,3 +30,95 @@ jobs: - name: Build run: bun build --compile src/cli/index.ts --outfile dist/mcp2cli + + - name: Upload binary + uses: actions/upload-artifact@v4 + with: + name: mcp2cli-binary + path: dist/mcp2cli + retention-days: 7 + + deploy: + needs: check + runs-on: self-hosted + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + env: + DEPLOY_HOST: 10.71.20.63 + DEPLOY_USER: root + SERVICE_NAME: mcp2cli + BINARY_PATH: /usr/local/bin/mcp2cli + HEALTH_URL: http://10.71.20.63:9500/health + + steps: + - uses: actions/checkout@v4 + + - uses: oven-sh/setup-bun@v2 + with: + bun-version: ${{ env.BUN_VERSION }} + + - name: Install and build for Linux x64 + run: | + bun install --frozen-lockfile + bun build --compile --target=bun-linux-x64 src/cli/index.ts --outfile dist/mcp2cli + + - name: Backup current binary + run: | + ssh ${{ env.DEPLOY_USER }}@${{ env.DEPLOY_HOST }} \ + "cp ${{ env.BINARY_PATH }} ${{ env.BINARY_PATH }}.bak 2>/dev/null || true" + + - name: Deploy new binary + run: | + scp dist/mcp2cli ${{ env.DEPLOY_USER }}@${{ env.DEPLOY_HOST }}:/tmp/mcp2cli-new + ssh ${{ env.DEPLOY_USER }}@${{ env.DEPLOY_HOST }} \ + "mv /tmp/mcp2cli-new ${{ env.BINARY_PATH }} && \ + chmod +x ${{ env.BINARY_PATH }} && \ + chown mcp2cli:mcp2cli ${{ env.BINARY_PATH }}" + + - name: Restart service + run: | + ssh ${{ env.DEPLOY_USER }}@${{ env.DEPLOY_HOST }} \ + "systemctl restart ${{ env.SERVICE_NAME }}" + + - name: Health check (with retry) + run: | + for i in 1 2 3 4 5; do + sleep 2 + if curl -sf --max-time 5 ${{ env.HEALTH_URL }} > /dev/null 2>&1; then + echo "Health check passed (attempt $i)" + exit 0 + fi + echo "Health check attempt $i failed, retrying..." + done + echo "Health check failed after 5 attempts" + exit 1 + + - name: Rollback on failure + if: failure() + run: | + echo "Deployment failed -- rolling back to previous binary" + ssh ${{ env.DEPLOY_USER }}@${{ env.DEPLOY_HOST }} \ + "if [ -f ${{ env.BINARY_PATH }}.bak ]; then \ + mv ${{ env.BINARY_PATH }}.bak ${{ env.BINARY_PATH }} && \ + systemctl restart ${{ env.SERVICE_NAME }} && \ + echo 'Rollback complete'; \ + else \ + echo 'No backup found -- manual intervention required'; \ + exit 1; \ + fi" + + - name: Verify rollback health + if: failure() + run: | + sleep 3 + if curl -sf --max-time 5 ${{ env.HEALTH_URL }} > /dev/null 2>&1; then + echo "Rollback health check passed -- service restored" + else + echo "WARNING: Rollback health check failed -- service may be down" + exit 1 + fi + + - name: Clean up backup + if: success() + run: | + ssh ${{ env.DEPLOY_USER }}@${{ env.DEPLOY_HOST }} \ + "rm -f ${{ env.BINARY_PATH }}.bak" diff --git a/.gitignore b/.gitignore index 3fabcb4..ab78580 100644 --- a/.gitignore +++ b/.gitignore @@ -22,6 +22,9 @@ report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json .env.production.local .env.local +# auth tokens (secrets) +tokens.json + # caches .eslintcache .cache diff --git a/src/daemon/auth-provider.ts b/src/daemon/auth-provider.ts new file mode 100644 index 0000000..a6f9f2a --- /dev/null +++ b/src/daemon/auth-provider.ts @@ -0,0 +1,256 @@ +/** + * Pluggable auth provider interface and token-based RBAC implementation. + * Designed for provider swap: token-based now, OAuth/OIDC later. + */ +import { createLogger } from "../logger/index.ts"; + +const log = createLogger("auth-provider"); + +// --- Interfaces (stable contract -- providers implement these) --- + +export type Role = "admin" | "agent" | "viewer"; + +export interface AuthContext { + userId: string; + role: Role; +} + +/** + * Auth provider interface. Implementations authenticate requests + * and return an AuthContext or null (unauthorized). + * Designed for drop-in replacement: TokenAuthProvider now, OAuthProvider later. + */ +export interface AuthProvider { + /** Authenticate a request. Returns AuthContext if valid, null if not. */ + authenticate(req: Request): AuthContext | null; + /** Whether auth is enabled (at least one credential configured). */ + readonly enabled: boolean; +} + +// --- Role hierarchy and permission checks --- + +/** Permissions by role. Higher roles inherit lower permissions. */ +const ROLE_PERMISSIONS: Record> = { + viewer: new Set(["list", "status"]), + agent: new Set(["list", "status", "call", "list-tools", "schema"]), + admin: new Set(["list", "status", "call", "list-tools", "schema", "add", "update", "remove", "reload", "import", "shutdown"]), +}; + +/** Check if a role has a specific permission. */ +export function hasPermission(role: Role, permission: string): boolean { + return ROLE_PERMISSIONS[role]?.has(permission) ?? false; +} + +// --- Token config schema --- + +export interface TokenEntry { + id: string; + token: string; + role: Role; + description?: string; + username?: string; + password?: string; +} + +export interface TokensConfig { + tokens: TokenEntry[]; +} + +const VALID_ROLES = new Set(["admin", "agent", "viewer"]); + +function validateTokensConfig(raw: unknown): TokensConfig { + if (!raw || typeof raw !== "object") { + throw new Error("Tokens config must be an object"); + } + const obj = raw as Record; + if (!Array.isArray(obj.tokens)) { + throw new Error("Tokens config must have a 'tokens' array"); + } + const tokens: TokenEntry[] = []; + const seenIds = new Set(); + const seenTokens = new Set(); + + for (const [i, entry] of obj.tokens.entries()) { + if (!entry || typeof entry !== "object") { + throw new Error(`Token entry ${i} must be an object`); + } + const e = entry as Record; + if (typeof e.id !== "string" || !e.id) { + throw new Error(`Token entry ${i} missing 'id'`); + } + if (typeof e.token !== "string" || !e.token) { + throw new Error(`Token entry ${i} missing 'token'`); + } + if (typeof e.role !== "string" || !VALID_ROLES.has(e.role)) { + throw new Error(`Token entry ${i} has invalid role '${e.role}' (must be admin, agent, or viewer)`); + } + if (seenIds.has(e.id)) { + throw new Error(`Duplicate token id: ${e.id}`); + } + if (seenTokens.has(e.token)) { + throw new Error(`Duplicate token value for id: ${e.id}`); + } + seenIds.add(e.id); + seenTokens.add(e.token); + // Validate username/password pairing: if one is set, both must be + if ((e.username !== undefined) !== (e.password !== undefined)) { + throw new Error(`Token entry ${i} ('${e.id}') has username without password or vice versa`); + } + if (e.username !== undefined && typeof e.username !== "string") { + throw new Error(`Token entry ${i} ('${e.id}') has non-string username`); + } + if (e.password !== undefined && typeof e.password !== "string") { + throw new Error(`Token entry ${i} ('${e.id}') has non-string password`); + } + + tokens.push({ + id: e.id, + token: e.token, + role: e.role as Role, + description: typeof e.description === "string" ? e.description : undefined, + username: typeof e.username === "string" ? e.username : undefined, + password: typeof e.password === "string" ? e.password : undefined, + }); + } + + return { tokens }; +} + +// --- TokenAuthProvider implementation --- + +/** + * Token-based auth provider with multi-user RBAC. + * Supports two modes: + * 1. Legacy: single MCP2CLI_AUTH_TOKEN env var (treated as admin) + * 2. Multi-user: tokens.json config file with id/token/role entries + * + * Uses timing-safe comparison to prevent timing attacks. + */ +export class TokenAuthProvider implements AuthProvider { + private tokenMap: Map; + /** Map username -> { password, token, context } for basic auth login */ + private userMap: Map; + readonly enabled: boolean; + + constructor(entries: TokenEntry[]) { + this.tokenMap = new Map(); + this.userMap = new Map(); + for (const entry of entries) { + this.tokenMap.set(entry.token, { userId: entry.id, role: entry.role }); + if (entry.username && entry.password) { + this.userMap.set(entry.username, { + password: entry.password, + token: entry.token, + ctx: { userId: entry.id, role: entry.role }, + }); + } + } + this.enabled = this.tokenMap.size > 0; + } + + authenticate(req: Request): AuthContext | null { + if (!this.enabled) return null; + + const authHeader = req.headers.get("authorization"); + if (!authHeader) return null; + + const match = authHeader.match(/^Bearer\s+(.+)$/i); + if (!match) return null; + + const provided = match[1]!; + + // Timing-safe: check against ALL tokens to prevent timing leaks + let found: AuthContext | null = null; + for (const [token, ctx] of this.tokenMap) { + if (timingSafeEqual(provided, token)) { + found = ctx; + } + } + + return found; + } + + /** + * Authenticate via username + password (for UI login form). + * Returns { ctx, token } on success (token is the bearer token for subsequent requests). + * Uses timing-safe comparison to prevent timing attacks. + */ + authenticateBasic(username: string, password: string): { ctx: AuthContext; token: string } | null { + // Timing-safe: always iterate all entries to prevent user-enumeration timing leaks + let found: { ctx: AuthContext; token: string } | null = null; + for (const [uname, entry] of this.userMap) { + const nameMatch = timingSafeEqual(username, uname); + const passMatch = timingSafeEqual(password, entry.password); + if (nameMatch && passMatch) { + found = { ctx: entry.ctx, token: entry.token }; + } + } + return found; + } + + /** + * Load from legacy single-token env var. + * The token gets admin role with userId "default". + */ + static fromEnvToken(token: string): TokenAuthProvider { + return new TokenAuthProvider([{ id: "default", token, role: "admin" }]); + } + + /** + * Load from tokens.json config file. + * Falls back to legacy MCP2CLI_AUTH_TOKEN if file doesn't exist. + * Returns a disabled provider if neither is configured. + */ + static async load(tokensPath?: string): Promise { + // Try tokens.json first + const path = tokensPath ?? getTokensPath(); + const file = Bun.file(path); + const exists = await file.exists(); + + if (exists) { + try { + const raw = await file.json(); + const config = validateTokensConfig(raw); + log.info("tokens_loaded", { path, count: config.tokens.length }); + return new TokenAuthProvider(config.tokens); + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + log.error("tokens_load_failed", { path, error: msg }); + throw err; + } + } + + // Fall back to legacy single-token env var + const envToken = process.env.MCP2CLI_AUTH_TOKEN; + if (envToken) { + log.info("using_legacy_token", { source: "MCP2CLI_AUTH_TOKEN" }); + return TokenAuthProvider.fromEnvToken(envToken); + } + + // No auth configured + log.warn("no_auth_configured"); + return new TokenAuthProvider([]); + } +} + +/** Resolve tokens config file path. */ +function getTokensPath(): string { + if (process.env.MCP2CLI_TOKENS_FILE) { + return process.env.MCP2CLI_TOKENS_FILE; + } + const home = process.env.HOME ?? ""; + return `${home}/.config/mcp2cli/tokens.json`; +} + +/** Timing-safe string comparison. */ +function timingSafeEqual(a: string, b: string): boolean { + const encoder = new TextEncoder(); + const bufA = encoder.encode(a); + const bufB = encoder.encode(b); + const len = Math.max(bufA.length, bufB.length); + let mismatch = bufA.length !== bufB.length ? 1 : 0; + for (let i = 0; i < len; i++) { + mismatch |= (bufA[i] ?? 0) ^ (bufB[i] ?? 0); + } + return mismatch === 0; +} diff --git a/src/daemon/auth.ts b/src/daemon/auth.ts index 273e76e..158f5aa 100644 --- a/src/daemon/auth.ts +++ b/src/daemon/auth.ts @@ -1,18 +1,20 @@ /** - * Bearer token authentication for TCP daemon mode. - * Uses timing-safe comparison to prevent timing attacks. - * /health is exempt from auth for load balancer probes. + * Authentication and authorization for the daemon HTTP server. + * Supports both legacy single-token and multi-user RBAC via AuthProvider. */ import { createLogger } from "../logger/index.ts"; +import type { AuthContext, AuthProvider } from "./auth-provider.ts"; +import { hasPermission } from "./auth-provider.ts"; const log = createLogger("auth"); -/** Paths that skip authentication (health checks, metrics scraping) */ -const AUTH_EXEMPT_PATHS = new Set(["/health", "/metrics"]); +/** Paths that skip authentication (health probes, metrics scraping, UI shell) */ +const AUTH_EXEMPT_PATHS = new Set(["/health", "/metrics", "/", "/api/auth/login"]); /** * Load the auth token from MCP2CLI_AUTH_TOKEN env var. * Returns undefined if not set (auth disabled). + * @deprecated Use TokenAuthProvider.load() for multi-user support. */ export function loadAuthToken(): string | undefined { return process.env.MCP2CLI_AUTH_TOKEN || undefined; @@ -20,18 +22,18 @@ export function loadAuthToken(): string | undefined { /** * Check if a request path is exempt from authentication. + * Exempt: /health, /metrics, / (UI HTML shell). + * All /api/* and tool endpoints require bearer token. */ export function isAuthExempt(path: string): boolean { return AUTH_EXEMPT_PATHS.has(path); } /** - * Validate bearer token from request Authorization header. - * Returns true if auth is disabled (no token configured) or token matches. - * Uses timing-safe comparison to prevent timing attacks. + * Legacy auth check -- validates bearer token against a single expected value. + * @deprecated Use authenticateRequest() with AuthProvider for RBAC. */ export function checkAuth(req: Request, expectedToken: string | undefined): boolean { - // No token configured -- auth disabled if (!expectedToken) return true; const authHeader = req.headers.get("authorization"); @@ -40,7 +42,6 @@ export function checkAuth(req: Request, expectedToken: string | undefined): bool return false; } - // Extract bearer token const match = authHeader.match(/^Bearer\s+(.+)$/i); if (!match) { log.warn("auth_malformed", { path: new URL(req.url).pathname }); @@ -56,23 +57,75 @@ export function checkAuth(req: Request, expectedToken: string | undefined): bool return true; } +// --- Provider-aware auth (new) --- + +/** + * Authenticate a request using the pluggable AuthProvider. + * Returns AuthContext if authenticated, null if not. + * When provider is disabled (no credentials configured), returns a default admin context. + */ +export function authenticateRequest(req: Request, provider: AuthProvider): AuthContext | null { + if (!provider.enabled) { + // No auth configured -- allow all as admin (backward compat) + return { userId: "anonymous", role: "admin" }; + } + + const ctx = provider.authenticate(req); + if (!ctx) { + log.warn("auth_failed", { path: new URL(req.url).pathname }); + } + return ctx; +} + +/** Map request paths to permission names for RBAC checks. */ +const PATH_PERMISSIONS: Array<{ pattern: RegExp; method: string; permission: string }> = [ + // Tool endpoints + { pattern: /^\/call$/, method: "POST", permission: "call" }, + { pattern: /^\/list-tools$/, method: "POST", permission: "list-tools" }, + { pattern: /^\/schema$/, method: "POST", permission: "schema" }, + { pattern: /^\/shutdown$/, method: "POST", permission: "shutdown" }, + // Management API + { pattern: /^\/api\/services$/, method: "GET", permission: "list" }, + { pattern: /^\/api\/services$/, method: "POST", permission: "add" }, + { pattern: /^\/api\/services\/reload$/, method: "POST", permission: "reload" }, + { pattern: /^\/api\/services\/import$/, method: "POST", permission: "import" }, + { pattern: /^\/api\/services\/[^/]+$/, method: "PUT", permission: "update" }, + { pattern: /^\/api\/services\/[^/]+$/, method: "DELETE", permission: "remove" }, + { pattern: /^\/api\/services\/[^/]+\/status$/, method: "GET", permission: "status" }, +]; + /** - * Timing-safe string comparison. - * Compares all bytes regardless of where a mismatch occurs. + * Check if an authenticated user has permission for a given request. + * Returns the required permission name if denied, null if allowed. */ +export function checkPermission(req: Request, ctx: AuthContext): string | null { + const url = new URL(req.url); + const path = url.pathname; + const method = req.method; + + for (const rule of PATH_PERMISSIONS) { + if (rule.method === method && rule.pattern.test(path)) { + if (!hasPermission(ctx.role, rule.permission)) { + log.warn("permission_denied", { userId: ctx.userId, role: ctx.role, permission: rule.permission, path }); + return rule.permission; + } + return null; + } + } + + // No matching rule -- allow by default (path exemption already handled upstream) + return null; +} + +/** Timing-safe string comparison. */ function timingSafeEqual(a: string, b: string): boolean { const encoder = new TextEncoder(); const bufA = encoder.encode(a); const bufB = encoder.encode(b); - - // Length difference leaks information, but we still compare all bytes - // of the longer string to maintain constant time const len = Math.max(bufA.length, bufB.length); let mismatch = bufA.length !== bufB.length ? 1 : 0; - for (let i = 0; i < len; i++) { mismatch |= (bufA[i] ?? 0) ^ (bufB[i] ?? 0); } - return mismatch === 0; } diff --git a/src/daemon/config-manager.ts b/src/daemon/config-manager.ts new file mode 100644 index 0000000..ebb877b --- /dev/null +++ b/src/daemon/config-manager.ts @@ -0,0 +1,250 @@ +/** + * Runtime config management with CRUD operations, disk persistence, + * and remote import support. Wraps in-memory config with Zod validation. + */ +import { ServicesConfigSchema, ServiceSchema } from "../config/index.ts"; +import type { ServicesConfig, ServiceConfig } from "../config/index.ts"; +import { getConfigPath } from "../config/index.ts"; +import { createLogger } from "../logger/index.ts"; +import type { ConnectionPool } from "./pool.ts"; + +const log = createLogger("config-manager"); + +export class ConfigManager { + private config: ServicesConfig; + private configPath: string; + private pool: ConnectionPool | null = null; + private writeLock = false; + + constructor(initialConfig: ServicesConfig, configPath?: string) { + this.config = initialConfig; + this.configPath = configPath ?? getConfigPath(); + } + + /** Attach pool reference for connection lifecycle management. */ + setPool(pool: ConnectionPool): void { + this.pool = pool; + } + + /** Get current services config (read-only snapshot). */ + getServices(): ServicesConfig { + return structuredClone(this.config); + } + + /** Get a single service config by name, or null if not found. */ + getService(name: string): ServiceConfig | null { + return this.config.services[name] ?? null; + } + + /** List service names. */ + get serviceNames(): string[] { + return Object.keys(this.config.services); + } + + /** + * Add a new service. Validates config via Zod, writes to disk. + * Throws if the service name already exists. + */ + async addService(name: string, serviceConfig: unknown): Promise { + if (this.config.services[name]) { + throw new ConfigManagerError(`Service already exists: ${name}`); + } + + const validated = this.validateServiceConfig(serviceConfig); + this.config.services[name] = validated; + await this.writeToDisk(); + log.info("service_added", { name, backend: validated.backend }); + } + + /** + * Update an existing service. Validates config via Zod, writes to disk. + * Closes existing connection so pool lazily reconnects with new config. + */ + async updateService(name: string, serviceConfig: unknown): Promise { + if (!this.config.services[name]) { + throw new ConfigManagerError(`Service not found: ${name}`); + } + + const validated = this.validateServiceConfig(serviceConfig); + + // Close existing connection before updating config + if (this.pool) { + await this.pool.closeService(name); + } + + this.config.services[name] = validated; + await this.writeToDisk(); + log.info("service_updated", { name, backend: validated.backend }); + } + + /** + * Remove a service. Closes its connection, removes from config, writes to disk. + */ + async removeService(name: string): Promise { + if (!this.config.services[name]) { + throw new ConfigManagerError(`Service not found: ${name}`); + } + + // Close connection first + if (this.pool) { + await this.pool.closeService(name); + } + + delete this.config.services[name]; + await this.writeToDisk(); + log.info("service_removed", { name }); + } + + /** + * Reload config from disk. Closes connections for removed services. + * Used after external edits (e.g., git pull). + */ + async reloadFromDisk(): Promise<{ added: string[]; removed: string[]; updated: string[] }> { + const file = Bun.file(this.configPath); + const exists = await file.exists(); + if (!exists) { + throw new ConfigManagerError(`Config file not found: ${this.configPath}`); + } + + const raw = await file.json(); + const result = ServicesConfigSchema.safeParse(raw); + if (!result.success) { + const issues = result.error.issues.map((i) => `${i.path.join(".")}: ${i.message}`).join(", "); + throw new ConfigManagerError(`Config validation failed: ${issues}`); + } + + const newConfig = result.data; + const diff = this.diffConfigs(this.config, newConfig); + + // Close connections for removed and updated services + if (this.pool) { + for (const name of [...diff.removed, ...diff.updated]) { + await this.pool.closeService(name); + } + } + + this.config = newConfig; + log.info("config_reloaded", diff); + return diff; + } + + /** + * Import services from a remote URL. Fetches JSON, validates, merges or replaces. + * Supports raw GitHub URLs, any HTTP endpoint serving valid services.json. + */ + async importFromUrl( + url: string, + mode: "merge" | "replace" = "merge", + ): Promise<{ added: string[]; removed: string[]; updated: string[] }> { + log.info("importing_config", { url, mode }); + + const response = await fetch(url); + if (!response.ok) { + throw new ConfigManagerError(`Failed to fetch config from ${url}: ${response.status} ${response.statusText}`); + } + + const raw = await response.json(); + const result = ServicesConfigSchema.safeParse(raw); + if (!result.success) { + const issues = result.error.issues.map((i) => `${i.path.join(".")}: ${i.message}`).join(", "); + throw new ConfigManagerError(`Imported config validation failed: ${issues}`); + } + + const importedConfig = result.data; + + if (mode === "replace") { + const diff = this.diffConfigs(this.config, importedConfig); + + // Close all existing connections for removed/updated services + if (this.pool) { + for (const name of [...diff.removed, ...diff.updated]) { + await this.pool.closeService(name); + } + } + + this.config = importedConfig; + await this.writeToDisk(); + log.info("config_imported_replace", diff); + return diff; + } + + // Merge mode: add new, update existing, keep unlisted + const added: string[] = []; + const updated: string[] = []; + + for (const [name, svcConfig] of Object.entries(importedConfig.services)) { + if (this.config.services[name]) { + // Close connection before updating + if (this.pool) { + await this.pool.closeService(name); + } + this.config.services[name] = svcConfig; + updated.push(name); + } else { + this.config.services[name] = svcConfig; + added.push(name); + } + } + + await this.writeToDisk(); + const diff = { added, removed: [], updated }; + log.info("config_imported_merge", diff); + return diff; + } + + /** + * Build a raw GitHub URL from repo/branch/path components. + */ + static buildGitHubRawUrl(repo: string, branch = "main", path = "services.json"): string { + // Handle both "owner/repo" and full "https://github.com/owner/repo" formats + const repoPath = repo.replace(/^https?:\/\/github\.com\//, "").replace(/\.git$/, ""); + return `https://raw.githubusercontent.com/${repoPath}/${branch}/${path}`; + } + + // -- Private helpers -- + + private validateServiceConfig(raw: unknown): ServiceConfig { + const result = ServiceSchema.safeParse(raw); + if (!result.success) { + const issues = result.error.issues.map((i) => `${i.path.join(".")}: ${i.message}`).join(", "); + throw new ConfigManagerError(`Invalid service config: ${issues}`); + } + return result.data; + } + + private async writeToDisk(): Promise { + if (this.writeLock) { + throw new ConfigManagerError("Concurrent write detected -- try again"); + } + this.writeLock = true; + try { + await Bun.write(this.configPath, JSON.stringify(this.config, null, 2) + "\n"); + log.debug("config_written", { path: this.configPath }); + } finally { + this.writeLock = false; + } + } + + private diffConfigs( + oldConfig: ServicesConfig, + newConfig: ServicesConfig, + ): { added: string[]; removed: string[]; updated: string[] } { + const oldNames = new Set(Object.keys(oldConfig.services)); + const newNames = new Set(Object.keys(newConfig.services)); + + const added = [...newNames].filter((n) => !oldNames.has(n)); + const removed = [...oldNames].filter((n) => !newNames.has(n)); + const updated = [...newNames].filter( + (n) => oldNames.has(n) && JSON.stringify(oldConfig.services[n]) !== JSON.stringify(newConfig.services[n]), + ); + + return { added, removed, updated }; + } +} + +export class ConfigManagerError extends Error { + constructor(message: string) { + super(message); + this.name = "ConfigManagerError"; + } +} diff --git a/src/daemon/index.ts b/src/daemon/index.ts index 3cff0bb..a442257 100644 --- a/src/daemon/index.ts +++ b/src/daemon/index.ts @@ -10,10 +10,11 @@ import { getDaemonPaths, getDaemonListenConfig } from "./paths.ts"; import { ConnectionPool } from "./pool.ts"; import { IdleTimer } from "./idle.ts"; import { createDaemonServer } from "./server.ts"; -import { loadConfig } from "../config/index.ts"; +import { loadConfig, getConfigPath } from "../config/index.ts"; import { createLogger } from "../logger/index.ts"; -import { loadAuthToken } from "./auth.ts"; import { MetricsCollector } from "./metrics.ts"; +import { ConfigManager } from "./config-manager.ts"; +import { TokenAuthProvider } from "./auth-provider.ts"; const log = createLogger("daemon"); @@ -56,11 +57,14 @@ export async function startDaemon(): Promise { // Load service configuration const config = await loadConfig(); - // Load auth token - const authToken = loadAuthToken(); - if (isTcp && !authToken) { - log.warn("no_auth_token", { - message: "TCP mode without MCP2CLI_AUTH_TOKEN -- daemon is unauthenticated", + // Create config manager for runtime CRUD (wraps the loaded config) + const configManager = new ConfigManager(config, getConfigPath()); + + // Load auth provider (tokens.json or legacy MCP2CLI_AUTH_TOKEN) + const authProvider = await TokenAuthProvider.load(); + if (isTcp && !authProvider.enabled) { + log.warn("no_auth_configured", { + message: "TCP mode without auth -- daemon is unauthenticated. Set MCP2CLI_AUTH_TOKEN or create tokens.json", }); } @@ -68,6 +72,9 @@ export async function startDaemon(): Promise { const pool = new ConnectionPool(); const metrics = new MetricsCollector(); + // Wire pool into config manager for connection lifecycle + configManager.setPool(pool); + // Parse idle timeout from env (seconds -> ms) // TCP mode: default to 0 (disabled) since it's a long-running network service const defaultTimeout = isTcp ? 0 : DEFAULT_IDLE_TIMEOUT_S; @@ -120,11 +127,12 @@ export async function startDaemon(): Promise { listenConfig, pool, config, + configManager, idleTimer, onShutdown: () => { void gracefulShutdown(); }, - authToken, + authProvider, metrics, }); diff --git a/src/daemon/server.ts b/src/daemon/server.ts index 3951ecb..d2d7a49 100644 --- a/src/daemon/server.ts +++ b/src/daemon/server.ts @@ -21,8 +21,12 @@ import { ConnectionError } from "../connection/errors.ts"; import { ToolError } from "../invocation/errors.ts"; import type { ErrorCode } from "../types/index.ts"; import { createLogger } from "../logger/index.ts"; -import { checkAuth, isAuthExempt } from "./auth.ts"; +import { isAuthExempt, authenticateRequest, checkPermission } from "./auth.ts"; +import { TokenAuthProvider } from "./auth-provider.ts"; +import type { AuthProvider, AuthContext } from "./auth-provider.ts"; import type { MetricsCollector } from "./metrics.ts"; +import { ConfigManager, ConfigManagerError } from "./config-manager.ts"; +import { renderUI } from "./ui.ts"; const log = createLogger("server"); const reqLog = createLogger("daemon:request"); @@ -31,9 +35,10 @@ interface DaemonServerOptions { listenConfig: DaemonListenConfig; pool: ConnectionPool; config: ServicesConfig; + configManager?: ConfigManager; idleTimer: IdleTimer; onShutdown: () => void; - authToken: string | undefined; + authProvider: AuthProvider; metrics: MetricsCollector; } @@ -55,7 +60,10 @@ function errorResponse( * Returns the Bun.serve() server instance. */ export function createDaemonServer(opts: DaemonServerOptions) { - const { listenConfig, pool, config, idleTimer, onShutdown, authToken, metrics } = opts; + const { listenConfig, pool, config, configManager, idleTimer, onShutdown, authProvider, metrics } = opts; + + // Use configManager's live config for pool lookups when available + const getConfig = (): ServicesConfig => configManager ? configManager.getServices() : config; // Build listen options based on mode const listenOpts = listenConfig.mode === "unix" @@ -71,9 +79,18 @@ export function createDaemonServer(opts: DaemonServerOptions) { log.debug("request received", { method: req.method, path }); // Auth check (exempt paths skip this) - if (!isAuthExempt(path) && !checkAuth(req, authToken)) { - metrics.onAuthFailure(); - return errorResponse("AUTH_ERROR", "Unauthorized", undefined, 401); + let authCtx: AuthContext | null = null; + if (!isAuthExempt(path)) { + authCtx = authenticateRequest(req, authProvider); + if (!authCtx) { + metrics.onAuthFailure(); + return errorResponse("AUTH_ERROR", "Unauthorized", undefined, 401); + } + // RBAC permission check + const denied = checkPermission(req, authCtx); + if (denied) { + return errorResponse("AUTH_ERROR", `Permission denied: ${denied} requires higher role`, undefined, 403); + } } // POST /call -- invoke a tool @@ -88,7 +105,7 @@ export function createDaemonServer(opts: DaemonServerOptions) { const body = (await req.json()) as DaemonCallRequest; callService = body.service; callTool = body.tool; - const conn = await pool.getConnection(body.service, config); + const conn = await pool.getConnection(body.service, getConfig()); // MEM-02: AbortSignal timeout on tool calls (default 30s, configurable) const timeout = parseInt(process.env.MCP2CLI_TOOL_TIMEOUT ?? "30000", 10); @@ -147,7 +164,7 @@ export function createDaemonServer(opts: DaemonServerOptions) { idleTimer.onRequestStart(); try { const body = (await req.json()) as DaemonListToolsRequest; - const conn = await pool.getConnection(body.service, config); + const conn = await pool.getConnection(body.service, getConfig()); const tools = await listToolsForService(conn.client); return Response.json({ success: true, result: tools }); } catch (err) { @@ -162,7 +179,7 @@ export function createDaemonServer(opts: DaemonServerOptions) { idleTimer.onRequestStart(); try { const body = (await req.json()) as DaemonSchemaRequest; - const conn = await pool.getConnection(body.service, config); + const conn = await pool.getConnection(body.service, getConfig()); const result = await getToolSchema( conn.client, body.tool, @@ -215,6 +232,188 @@ export function createDaemonServer(opts: DaemonServerOptions) { return Response.json({ status: "shutting_down" }); } + // POST /api/auth/login -- exchange username+password for bearer token (auth-exempt) + if (path === "/api/auth/login" && req.method === "POST") { + try { + const body = await req.json() as { username?: string; password?: string }; + if (!body.username || !body.password) { + return errorResponse("INPUT_VALIDATION_ERROR", "Missing username or password", undefined, 400); + } + if (!(authProvider instanceof TokenAuthProvider)) { + return errorResponse("AUTH_ERROR", "Login not supported with current auth provider", undefined, 501); + } + const result = authProvider.authenticateBasic(body.username, body.password); + if (!result) { + metrics.onAuthFailure(); + return Response.json({ success: false, error: "Invalid username or password" }, { status: 401 }); + } + return Response.json({ + success: true, + token: result.token, + userId: result.ctx.userId, + role: result.ctx.role, + }); + } catch { + return errorResponse("INPUT_VALIDATION_ERROR", "Invalid request body", undefined, 400); + } + } + + // GET /api/auth/me -- returns current user identity and role + if (path === "/api/auth/me" && req.method === "GET") { + return Response.json({ + success: true, + userId: authCtx?.userId ?? "anonymous", + role: authCtx?.role ?? "admin", + }); + } + + // --- Management API routes (require configManager) --- + if (configManager) { + // GET / -- Web UI + if (path === "/" && req.method === "GET") { + return new Response(renderUI(), { + headers: { "Content-Type": "text/html; charset=utf-8" }, + }); + } + + // GET /api/services -- list all services with connection status + if (path === "/api/services" && req.method === "GET") { + const cfg = configManager.getServices(); + const services = Object.entries(cfg.services).map(([name, svc]) => ({ + name, + backend: svc.backend, + connected: pool.serviceNames.includes(name), + ...(svc.backend !== "stdio" && "url" in svc ? { url: svc.url } : {}), + })); + return Response.json({ success: true, services }); + } + + // POST /api/services -- add a service { name, config } + if (path === "/api/services" && req.method === "POST") { + try { + const body = await req.json() as { name: string; config: unknown }; + if (!body.name || !body.config) { + return errorResponse("INPUT_VALIDATION_ERROR", "Missing 'name' or 'config' field", undefined, 400); + } + await configManager.addService(body.name, body.config); + return Response.json({ success: true, message: `Service '${body.name}' added` }, { status: 201 }); + } catch (err) { + if (err instanceof ConfigManagerError) { + return errorResponse("INPUT_VALIDATION_ERROR", err.message, undefined, 400); + } + return errorResponse("INTERNAL_ERROR", err instanceof Error ? err.message : String(err)); + } + } + + // PUT /api/services/:name -- update a service + const putMatch = path.match(/^\/api\/services\/([^/]+)$/); + if (putMatch && req.method === "PUT") { + try { + const name = decodeURIComponent(putMatch[1]!); + const body = await req.json() as { config: unknown }; + if (!body.config) { + return errorResponse("INPUT_VALIDATION_ERROR", "Missing 'config' field", undefined, 400); + } + await configManager.updateService(name, body.config); + return Response.json({ success: true, message: `Service '${name}' updated` }); + } catch (err) { + if (err instanceof ConfigManagerError) { + return errorResponse("INPUT_VALIDATION_ERROR", err.message, undefined, 400); + } + return errorResponse("INTERNAL_ERROR", err instanceof Error ? err.message : String(err)); + } + } + + // DELETE /api/services/:name -- remove a service + const deleteMatch = path.match(/^\/api\/services\/([^/]+)$/); + if (deleteMatch && req.method === "DELETE") { + try { + const name = decodeURIComponent(deleteMatch[1]!); + await configManager.removeService(name); + return Response.json({ success: true, message: `Service '${name}' removed` }); + } catch (err) { + if (err instanceof ConfigManagerError) { + return errorResponse("INPUT_VALIDATION_ERROR", err.message, undefined, 400); + } + return errorResponse("INTERNAL_ERROR", err instanceof Error ? err.message : String(err)); + } + } + + // GET /api/services/:name/status -- connection health + tool count + const statusMatch = path.match(/^\/api\/services\/([^/]+)\/status$/); + if (statusMatch && req.method === "GET") { + try { + const name = decodeURIComponent(statusMatch[1]!); + const svc = configManager.getService(name); + if (!svc) { + return errorResponse("UNKNOWN_COMMAND", `Service not found: ${name}`, undefined, 404); + } + const connected = pool.serviceNames.includes(name); + let toolCount = 0; + if (connected) { + try { + const conn = await pool.getConnection(name, getConfig()); + const tools = await listToolsForService(conn.client); + toolCount = tools.length; + } catch { /* connection may have gone stale */ } + } + return Response.json({ + success: true, + name, + backend: svc.backend, + connected, + toolCount, + }); + } catch (err) { + return errorResponse("INTERNAL_ERROR", err instanceof Error ? err.message : String(err)); + } + } + + // POST /api/services/reload -- reload config from disk + if (path === "/api/services/reload" && req.method === "POST") { + try { + const diff = await configManager.reloadFromDisk(); + return Response.json({ success: true, ...diff }); + } catch (err) { + if (err instanceof ConfigManagerError) { + return errorResponse("INPUT_VALIDATION_ERROR", err.message, undefined, 400); + } + return errorResponse("INTERNAL_ERROR", err instanceof Error ? err.message : String(err)); + } + } + + // POST /api/services/import -- import from URL { url, mode?, repo?, branch?, path? } + if (path === "/api/services/import" && req.method === "POST") { + try { + const body = await req.json() as { + url?: string; + mode?: "merge" | "replace"; + repo?: string; + branch?: string; + path?: string; + }; + let importUrl = body.url; + if (!importUrl && body.repo) { + importUrl = ConfigManager.buildGitHubRawUrl( + body.repo, + body.branch ?? "main", + body.path ?? "services.json", + ); + } + if (!importUrl) { + return errorResponse("INPUT_VALIDATION_ERROR", "Missing 'url' or 'repo' field", undefined, 400); + } + const diff = await configManager.importFromUrl(importUrl, body.mode ?? "merge"); + return Response.json({ success: true, url: importUrl, ...diff }); + } catch (err) { + if (err instanceof ConfigManagerError) { + return errorResponse("INPUT_VALIDATION_ERROR", err.message, undefined, 400); + } + return errorResponse("INTERNAL_ERROR", err instanceof Error ? err.message : String(err)); + } + } + } + // Default: 404 return Response.json({ error: "not_found" }, { status: 404 }); }, diff --git a/src/daemon/ui.ts b/src/daemon/ui.ts new file mode 100644 index 0000000..20ac5ee --- /dev/null +++ b/src/daemon/ui.ts @@ -0,0 +1,626 @@ +/** + * Embedded web UI for mcp2cli daemon management. + * Single-file HTML dashboard -- no build step, no dependencies. + */ + +export function renderUI(): string { + return ` + + + + +mcp2cli - Service Manager + + + + + + + +
+
+ + +
+ +

mcp2cli

+
Service Manager
+ +
+
Status: --
+
Services: --
+
Connected: --
+
Uptime: --
+
+ +
+ + + +
+ + + + + + +
ServiceBackendStatusEndpointActions
Loading...
+ + + + + + + +
+
+ + + +`; +} diff --git a/tests/daemon/api.test.ts b/tests/daemon/api.test.ts new file mode 100644 index 0000000..c705de9 --- /dev/null +++ b/tests/daemon/api.test.ts @@ -0,0 +1,145 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test"; +import { mkdtemp, rm } from "node:fs/promises"; +import { join } from "node:path"; +import { tmpdir } from "node:os"; +import { ConfigManager } from "../../src/daemon/config-manager.ts"; +import type { ServicesConfig } from "../../src/config/index.ts"; +import { isAuthExempt } from "../../src/daemon/auth.ts"; + +const STDIO_SERVICE = { + backend: "stdio" as const, + command: "/usr/bin/echo", + args: ["hello"], + env: {}, +}; + +function makeConfig(services: Record = {}): ServicesConfig { + return { services } as ServicesConfig; +} + +describe("Management API auth exemptions", () => { + test("/ is auth-exempt (UI shell)", () => { + expect(isAuthExempt("/")).toBe(true); + }); + + test("/health is auth-exempt", () => { + expect(isAuthExempt("/health")).toBe(true); + }); + + test("/metrics is auth-exempt", () => { + expect(isAuthExempt("/metrics")).toBe(true); + }); + + test("/api/services requires auth", () => { + expect(isAuthExempt("/api/services")).toBe(false); + }); + + test("/api/services/import requires auth", () => { + expect(isAuthExempt("/api/services/import")).toBe(false); + }); + + test("/api/services/myservice/status requires auth", () => { + expect(isAuthExempt("/api/services/myservice/status")).toBe(false); + }); + + test("/call requires auth", () => { + expect(isAuthExempt("/call")).toBe(false); + }); + + test("/shutdown requires auth", () => { + expect(isAuthExempt("/shutdown")).toBe(false); + }); +}); + +describe("ConfigManager API operations", () => { + let tmpDir: string; + let configPath: string; + let mgr: ConfigManager; + + beforeEach(async () => { + tmpDir = await mkdtemp(join(tmpdir(), "mcp2cli-api-test-")); + configPath = join(tmpDir, "services.json"); + const initial = makeConfig({ echo: STDIO_SERVICE }); + await Bun.write(configPath, JSON.stringify(initial, null, 2)); + mgr = new ConfigManager(initial, configPath); + }); + + afterEach(async () => { + await rm(tmpDir, { recursive: true, force: true }); + }); + + test("full CRUD lifecycle", async () => { + // List: starts with echo + const initial = mgr.getServices(); + expect(Object.keys(initial.services)).toEqual(["echo"]); + + // Add: new stdio service + await mgr.addService("cat", { + backend: "stdio", + command: "/bin/cat", + args: [], + env: {}, + }); + expect(mgr.serviceNames).toContain("cat"); + expect(mgr.serviceNames).toContain("echo"); + + // Update: change command + await mgr.updateService("cat", { + backend: "stdio", + command: "/usr/bin/cat", + args: ["-n"], + env: {}, + }); + const updated = mgr.getService("cat"); + expect((updated as typeof STDIO_SERVICE).command).toBe("/usr/bin/cat"); + + // Remove + await mgr.removeService("cat"); + expect(mgr.getService("cat")).toBeNull(); + expect(mgr.serviceNames).toEqual(["echo"]); + + // Verify final disk state + const onDisk = await Bun.file(configPath).json(); + expect(Object.keys(onDisk.services)).toEqual(["echo"]); + }); + + test("add rejects invalid backend", async () => { + expect( + mgr.addService("bad", { backend: "ftp", url: "ftp://localhost" }), + ).rejects.toThrow(); + }); + + test("add rejects stdio without command", async () => { + expect( + mgr.addService("bad", { backend: "stdio" }), + ).rejects.toThrow(); + }); + + test("add rejects http without url", async () => { + expect( + mgr.addService("bad", { backend: "http" }), + ).rejects.toThrow(); + }); + + test("add service with http backend", async () => { + await mgr.addService("web", { + backend: "http", + url: "http://localhost:8080/sse", + headers: { Authorization: "Bearer test" }, + }); + const svc = mgr.getService("web"); + expect(svc).not.toBeNull(); + expect(svc!.backend).toBe("http"); + }); + + test("add service with websocket backend", async () => { + await mgr.addService("ws", { + backend: "websocket", + url: "ws://localhost:9090", + headers: {}, + }); + const svc = mgr.getService("ws"); + expect(svc).not.toBeNull(); + expect(svc!.backend).toBe("websocket"); + }); +}); diff --git a/tests/daemon/auth-provider.test.ts b/tests/daemon/auth-provider.test.ts new file mode 100644 index 0000000..1df6fdf --- /dev/null +++ b/tests/daemon/auth-provider.test.ts @@ -0,0 +1,331 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test"; +import { mkdtemp, rm } from "node:fs/promises"; +import { join } from "node:path"; +import { tmpdir } from "node:os"; +import { + TokenAuthProvider, + hasPermission, + type TokenEntry, +} from "../../src/daemon/auth-provider.ts"; +import { authenticateRequest, checkPermission } from "../../src/daemon/auth.ts"; + +function makeReq(path: string, method = "GET", token?: string): Request { + const headers: Record = {}; + if (token) headers["Authorization"] = `Bearer ${token}`; + return new Request(`http://localhost${path}`, { method, headers }); +} + +const ADMIN_TOKEN: TokenEntry = { id: "rico", token: "admin-secret", role: "admin" }; +const AGENT_TOKEN: TokenEntry = { id: "skippy", token: "agent-secret", role: "agent" }; +const ADMIN_WITH_LOGIN: TokenEntry = { + id: "rico", token: "admin-secret", role: "admin", + username: "rico", password: "s3cret-pass!", +}; +const AGENT_WITH_LOGIN: TokenEntry = { + id: "skippy", token: "agent-secret", role: "agent", + username: "skippy", password: "agent-pass-42", +}; + +describe("TokenAuthProvider", () => { + test("authenticates valid admin token", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); + const ctx = provider.authenticate(makeReq("/call", "POST", "admin-secret")); + expect(ctx).not.toBeNull(); + expect(ctx!.userId).toBe("rico"); + expect(ctx!.role).toBe("admin"); + }); + + test("authenticates valid agent token", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN, AGENT_TOKEN]); + const ctx = provider.authenticate(makeReq("/call", "POST", "agent-secret")); + expect(ctx).not.toBeNull(); + expect(ctx!.userId).toBe("skippy"); + expect(ctx!.role).toBe("agent"); + }); + + test("returns null for invalid token", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); + const ctx = provider.authenticate(makeReq("/call", "POST", "wrong-token")); + expect(ctx).toBeNull(); + }); + + test("returns null for missing auth header", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); + const ctx = provider.authenticate(makeReq("/call", "POST")); + expect(ctx).toBeNull(); + }); + + test("returns null for malformed auth header", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); + const req = new Request("http://localhost/call", { + method: "POST", + headers: { Authorization: "Basic abc" }, + }); + expect(provider.authenticate(req)).toBeNull(); + }); + + test("enabled is false with no tokens", () => { + const provider = new TokenAuthProvider([]); + expect(provider.enabled).toBe(false); + }); + + test("enabled is true with tokens", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); + expect(provider.enabled).toBe(true); + }); + + test("fromEnvToken creates admin provider", () => { + const provider = TokenAuthProvider.fromEnvToken("my-secret"); + const ctx = provider.authenticate(makeReq("/call", "POST", "my-secret")); + expect(ctx).not.toBeNull(); + expect(ctx!.userId).toBe("default"); + expect(ctx!.role).toBe("admin"); + }); + + test("case-insensitive Bearer prefix", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); + const req = new Request("http://localhost/call", { + method: "POST", + headers: { Authorization: "bearer admin-secret" }, + }); + expect(provider.authenticate(req)).not.toBeNull(); + }); +}); + +describe("TokenAuthProvider.authenticateBasic", () => { + test("authenticates valid username+password and returns token", () => { + const provider = new TokenAuthProvider([ADMIN_WITH_LOGIN, AGENT_WITH_LOGIN]); + const result = provider.authenticateBasic("rico", "s3cret-pass!"); + expect(result).not.toBeNull(); + expect(result!.ctx.userId).toBe("rico"); + expect(result!.ctx.role).toBe("admin"); + expect(result!.token).toBe("admin-secret"); + }); + + test("authenticates agent user", () => { + const provider = new TokenAuthProvider([ADMIN_WITH_LOGIN, AGENT_WITH_LOGIN]); + const result = provider.authenticateBasic("skippy", "agent-pass-42"); + expect(result).not.toBeNull(); + expect(result!.ctx.userId).toBe("skippy"); + expect(result!.ctx.role).toBe("agent"); + expect(result!.token).toBe("agent-secret"); + }); + + test("returns null for wrong password", () => { + const provider = new TokenAuthProvider([ADMIN_WITH_LOGIN]); + const result = provider.authenticateBasic("rico", "wrong-password"); + expect(result).toBeNull(); + }); + + test("returns null for wrong username", () => { + const provider = new TokenAuthProvider([ADMIN_WITH_LOGIN]); + const result = provider.authenticateBasic("nobody", "s3cret-pass!"); + expect(result).toBeNull(); + }); + + test("returns null for empty credentials", () => { + const provider = new TokenAuthProvider([ADMIN_WITH_LOGIN]); + expect(provider.authenticateBasic("", "")).toBeNull(); + expect(provider.authenticateBasic("rico", "")).toBeNull(); + expect(provider.authenticateBasic("", "s3cret-pass!")).toBeNull(); + }); + + test("ignores entries without username/password", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); // no username/password + const result = provider.authenticateBasic("rico", "admin-secret"); + expect(result).toBeNull(); + }); + + test("mixed entries: login only works for entries with credentials", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN, AGENT_WITH_LOGIN]); + // Token-only entry: no basic auth + expect(provider.authenticateBasic("rico", "admin-secret")).toBeNull(); + // Entry with login credentials: basic auth works + const result = provider.authenticateBasic("skippy", "agent-pass-42"); + expect(result).not.toBeNull(); + expect(result!.ctx.userId).toBe("skippy"); + }); +}); + +describe("TokenAuthProvider.load", () => { + let tmpDir: string; + + beforeEach(async () => { + tmpDir = await mkdtemp(join(tmpdir(), "mcp2cli-auth-test-")); + }); + + afterEach(async () => { + await rm(tmpDir, { recursive: true, force: true }); + delete process.env.MCP2CLI_TOKENS_FILE; + delete process.env.MCP2CLI_AUTH_TOKEN; + }); + + test("loads from tokens.json file", async () => { + const tokensPath = join(tmpDir, "tokens.json"); + await Bun.write(tokensPath, JSON.stringify({ + tokens: [ + { id: "rico", token: "t1", role: "admin" }, + { id: "skippy", token: "t2", role: "agent" }, + ], + })); + process.env.MCP2CLI_TOKENS_FILE = tokensPath; + const provider = await TokenAuthProvider.load(); + expect(provider.enabled).toBe(true); + expect(provider.authenticate(makeReq("/", "GET", "t1"))?.role).toBe("admin"); + expect(provider.authenticate(makeReq("/", "GET", "t2"))?.role).toBe("agent"); + }); + + test("falls back to MCP2CLI_AUTH_TOKEN", async () => { + process.env.MCP2CLI_TOKENS_FILE = join(tmpDir, "nonexistent.json"); + process.env.MCP2CLI_AUTH_TOKEN = "legacy-token"; + const provider = await TokenAuthProvider.load(); + expect(provider.enabled).toBe(true); + const ctx = provider.authenticate(makeReq("/", "GET", "legacy-token")); + expect(ctx?.userId).toBe("default"); + expect(ctx?.role).toBe("admin"); + }); + + test("returns disabled provider when nothing configured", async () => { + process.env.MCP2CLI_TOKENS_FILE = join(tmpDir, "nonexistent.json"); + delete process.env.MCP2CLI_AUTH_TOKEN; + const provider = await TokenAuthProvider.load(); + expect(provider.enabled).toBe(false); + }); + + test("rejects invalid tokens.json", async () => { + const tokensPath = join(tmpDir, "tokens.json"); + await Bun.write(tokensPath, JSON.stringify({ tokens: [{ id: "x" }] })); + process.env.MCP2CLI_TOKENS_FILE = tokensPath; + expect(TokenAuthProvider.load()).rejects.toThrow(); + }); + + test("rejects duplicate token ids", async () => { + const tokensPath = join(tmpDir, "tokens.json"); + await Bun.write(tokensPath, JSON.stringify({ + tokens: [ + { id: "same", token: "t1", role: "admin" }, + { id: "same", token: "t2", role: "agent" }, + ], + })); + process.env.MCP2CLI_TOKENS_FILE = tokensPath; + expect(TokenAuthProvider.load()).rejects.toThrow("Duplicate token id"); + }); + + test("loads entries with username/password", async () => { + const tokensPath = join(tmpDir, "tokens.json"); + await Bun.write(tokensPath, JSON.stringify({ + tokens: [ + { id: "rico", token: "t1", role: "admin", username: "rico", password: "pass1" }, + ], + })); + process.env.MCP2CLI_TOKENS_FILE = tokensPath; + const provider = await TokenAuthProvider.load(); + const result = provider.authenticateBasic("rico", "pass1"); + expect(result).not.toBeNull(); + expect(result!.ctx.userId).toBe("rico"); + expect(result!.token).toBe("t1"); + }); + + test("rejects username without password", async () => { + const tokensPath = join(tmpDir, "tokens.json"); + await Bun.write(tokensPath, JSON.stringify({ + tokens: [{ id: "x", token: "t", role: "admin", username: "x" }], + })); + process.env.MCP2CLI_TOKENS_FILE = tokensPath; + expect(TokenAuthProvider.load()).rejects.toThrow("username without password"); + }); + + test("rejects invalid role", async () => { + const tokensPath = join(tmpDir, "tokens.json"); + await Bun.write(tokensPath, JSON.stringify({ + tokens: [{ id: "x", token: "t", role: "superadmin" }], + })); + process.env.MCP2CLI_TOKENS_FILE = tokensPath; + expect(TokenAuthProvider.load()).rejects.toThrow("invalid role"); + }); +}); + +describe("hasPermission", () => { + test("admin has all permissions", () => { + expect(hasPermission("admin", "add")).toBe(true); + expect(hasPermission("admin", "remove")).toBe(true); + expect(hasPermission("admin", "import")).toBe(true); + expect(hasPermission("admin", "shutdown")).toBe(true); + expect(hasPermission("admin", "call")).toBe(true); + expect(hasPermission("admin", "list")).toBe(true); + }); + + test("agent can call tools and list but not mutate config", () => { + expect(hasPermission("agent", "call")).toBe(true); + expect(hasPermission("agent", "list")).toBe(true); + expect(hasPermission("agent", "list-tools")).toBe(true); + expect(hasPermission("agent", "schema")).toBe(true); + expect(hasPermission("agent", "status")).toBe(true); + expect(hasPermission("agent", "add")).toBe(false); + expect(hasPermission("agent", "remove")).toBe(false); + expect(hasPermission("agent", "import")).toBe(false); + expect(hasPermission("agent", "reload")).toBe(false); + expect(hasPermission("agent", "shutdown")).toBe(false); + }); + + test("viewer can only list and check status", () => { + expect(hasPermission("viewer", "list")).toBe(true); + expect(hasPermission("viewer", "status")).toBe(true); + expect(hasPermission("viewer", "call")).toBe(false); + expect(hasPermission("viewer", "add")).toBe(false); + expect(hasPermission("viewer", "shutdown")).toBe(false); + }); +}); + +describe("authenticateRequest", () => { + test("returns admin context when provider is disabled", () => { + const provider = new TokenAuthProvider([]); + const ctx = authenticateRequest(makeReq("/call", "POST"), provider); + expect(ctx).not.toBeNull(); + expect(ctx!.role).toBe("admin"); + expect(ctx!.userId).toBe("anonymous"); + }); + + test("returns context for valid token", () => { + const provider = new TokenAuthProvider([AGENT_TOKEN]); + const ctx = authenticateRequest(makeReq("/call", "POST", "agent-secret"), provider); + expect(ctx).not.toBeNull(); + expect(ctx!.role).toBe("agent"); + }); + + test("returns null for invalid token", () => { + const provider = new TokenAuthProvider([ADMIN_TOKEN]); + const ctx = authenticateRequest(makeReq("/call", "POST", "wrong"), provider); + expect(ctx).toBeNull(); + }); +}); + +describe("checkPermission", () => { + test("admin can access all routes", () => { + const admin = { userId: "rico", role: "admin" as const }; + expect(checkPermission(makeReq("/api/services", "GET"), admin)).toBeNull(); + expect(checkPermission(makeReq("/api/services", "POST"), admin)).toBeNull(); + expect(checkPermission(makeReq("/api/services/foo", "DELETE"), admin)).toBeNull(); + expect(checkPermission(makeReq("/api/services/import", "POST"), admin)).toBeNull(); + expect(checkPermission(makeReq("/shutdown", "POST"), admin)).toBeNull(); + }); + + test("agent can list and call but not mutate", () => { + const agent = { userId: "skippy", role: "agent" as const }; + expect(checkPermission(makeReq("/api/services", "GET"), agent)).toBeNull(); + expect(checkPermission(makeReq("/call", "POST"), agent)).toBeNull(); + expect(checkPermission(makeReq("/list-tools", "POST"), agent)).toBeNull(); + expect(checkPermission(makeReq("/api/services", "POST"), agent)).toBe("add"); + expect(checkPermission(makeReq("/api/services/foo", "DELETE"), agent)).toBe("remove"); + expect(checkPermission(makeReq("/api/services/import", "POST"), agent)).toBe("import"); + expect(checkPermission(makeReq("/shutdown", "POST"), agent)).toBe("shutdown"); + }); + + test("viewer can only list and status", () => { + const viewer = { userId: "v", role: "viewer" as const }; + expect(checkPermission(makeReq("/api/services", "GET"), viewer)).toBeNull(); + expect(checkPermission(makeReq("/api/services/foo/status", "GET"), viewer)).toBeNull(); + expect(checkPermission(makeReq("/call", "POST"), viewer)).toBe("call"); + expect(checkPermission(makeReq("/api/services", "POST"), viewer)).toBe("add"); + }); +}); diff --git a/tests/daemon/config-manager.test.ts b/tests/daemon/config-manager.test.ts new file mode 100644 index 0000000..0b05755 --- /dev/null +++ b/tests/daemon/config-manager.test.ts @@ -0,0 +1,171 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test"; +import { mkdtemp, rm } from "node:fs/promises"; +import { join } from "node:path"; +import { tmpdir } from "node:os"; +import { ConfigManager, ConfigManagerError } from "../../src/daemon/config-manager.ts"; +import type { ServicesConfig } from "../../src/config/index.ts"; + +const STDIO_SERVICE = { + backend: "stdio" as const, + command: "/usr/bin/echo", + args: ["hello"], + env: {}, +}; + +const HTTP_SERVICE = { + backend: "http" as const, + url: "http://localhost:3000/sse", + headers: {}, +}; + +function makeConfig(services: Record = {}): ServicesConfig { + return { services } as ServicesConfig; +} + +describe("ConfigManager", () => { + let tmpDir: string; + let configPath: string; + let mgr: ConfigManager; + + beforeEach(async () => { + tmpDir = await mkdtemp(join(tmpdir(), "mcp2cli-test-")); + configPath = join(tmpDir, "services.json"); + const initial = makeConfig({ echo: STDIO_SERVICE }); + await Bun.write(configPath, JSON.stringify(initial, null, 2)); + mgr = new ConfigManager(initial, configPath); + }); + + afterEach(async () => { + await rm(tmpDir, { recursive: true, force: true }); + }); + + describe("getServices", () => { + test("returns a clone of the config", () => { + const cfg = mgr.getServices(); + expect(cfg.services.echo).toBeDefined(); + // Mutating the returned object should not affect internal state + delete cfg.services.echo; + expect(mgr.getServices().services.echo).toBeDefined(); + }); + }); + + describe("getService", () => { + test("returns service config by name", () => { + const svc = mgr.getService("echo"); + expect(svc).not.toBeNull(); + expect(svc!.backend).toBe("stdio"); + }); + + test("returns null for unknown service", () => { + expect(mgr.getService("nonexistent")).toBeNull(); + }); + }); + + describe("serviceNames", () => { + test("lists configured service names", () => { + expect(mgr.serviceNames).toEqual(["echo"]); + }); + }); + + describe("addService", () => { + test("adds a valid service and persists to disk", async () => { + await mgr.addService("web", HTTP_SERVICE); + expect(mgr.getService("web")).not.toBeNull(); + + // Verify disk persistence + const onDisk = await Bun.file(configPath).json(); + expect(onDisk.services.web).toBeDefined(); + expect(onDisk.services.web.backend).toBe("http"); + }); + + test("throws on duplicate name", async () => { + expect(mgr.addService("echo", STDIO_SERVICE)).rejects.toThrow(ConfigManagerError); + }); + + test("throws on invalid config", async () => { + expect(mgr.addService("bad", { backend: "unknown" })).rejects.toThrow(ConfigManagerError); + }); + + test("throws on missing required fields", async () => { + expect(mgr.addService("bad", { backend: "stdio" })).rejects.toThrow(); + }); + }); + + describe("updateService", () => { + test("updates an existing service", async () => { + await mgr.updateService("echo", { ...STDIO_SERVICE, command: "/bin/cat" }); + const svc = mgr.getService("echo"); + expect(svc).not.toBeNull(); + expect((svc as typeof STDIO_SERVICE).command).toBe("/bin/cat"); + }); + + test("throws on unknown service", async () => { + expect(mgr.updateService("nope", STDIO_SERVICE)).rejects.toThrow(ConfigManagerError); + }); + }); + + describe("removeService", () => { + test("removes a service and persists", async () => { + // Add a second service first (config requires at least one) + await mgr.addService("web", HTTP_SERVICE); + await mgr.removeService("echo"); + expect(mgr.getService("echo")).toBeNull(); + expect(mgr.serviceNames).toEqual(["web"]); + + const onDisk = await Bun.file(configPath).json(); + expect(onDisk.services.echo).toBeUndefined(); + }); + + test("throws on unknown service", async () => { + expect(mgr.removeService("nope")).rejects.toThrow(ConfigManagerError); + }); + }); + + describe("reloadFromDisk", () => { + test("picks up disk changes", async () => { + // Write a modified config directly to disk + const newConfig = makeConfig({ + echo: STDIO_SERVICE, + web: HTTP_SERVICE, + }); + await Bun.write(configPath, JSON.stringify(newConfig, null, 2)); + + const diff = await mgr.reloadFromDisk(); + expect(diff.added).toEqual(["web"]); + expect(diff.removed).toEqual([]); + expect(mgr.serviceNames).toContain("web"); + }); + + test("detects removed services", async () => { + // Add web first, then reload with just echo + await mgr.addService("web", HTTP_SERVICE); + const echoOnly = makeConfig({ echo: STDIO_SERVICE }); + await Bun.write(configPath, JSON.stringify(echoOnly, null, 2)); + + const diff = await mgr.reloadFromDisk(); + expect(diff.removed).toEqual(["web"]); + }); + + test("throws on missing file", async () => { + await rm(configPath); + expect(mgr.reloadFromDisk()).rejects.toThrow(ConfigManagerError); + }); + }); + + describe("buildGitHubRawUrl", () => { + test("builds from owner/repo", () => { + const url = ConfigManager.buildGitHubRawUrl("user/repo"); + expect(url).toBe("https://raw.githubusercontent.com/user/repo/main/services.json"); + }); + + test("builds with custom branch and path", () => { + const url = ConfigManager.buildGitHubRawUrl("user/repo", "develop", "config/services.json"); + expect(url).toBe("https://raw.githubusercontent.com/user/repo/develop/config/services.json"); + }); + + test("strips full GitHub URL prefix", () => { + const url = ConfigManager.buildGitHubRawUrl("https://github.com/user/repo.git"); + expect(url).toBe("https://raw.githubusercontent.com/user/repo/main/services.json"); + }); + }); +}); diff --git a/tests/daemon/observability.test.ts b/tests/daemon/observability.test.ts index 33cb216..7e503be 100644 --- a/tests/daemon/observability.test.ts +++ b/tests/daemon/observability.test.ts @@ -59,6 +59,7 @@ const { ConnectionPool } = await import("../../src/daemon/pool.ts"); const { createDaemonServer } = await import("../../src/daemon/server.ts"); const { IdleTimer } = await import("../../src/daemon/idle.ts"); const { MetricsCollector } = await import("../../src/daemon/metrics.ts"); +const { TokenAuthProvider } = await import("../../src/daemon/auth-provider.ts"); const testConfig = { services: { @@ -113,7 +114,7 @@ describe("Daemon Observability", () => { config: testConfig, idleTimer, onShutdown: () => {}, - authToken: undefined, + authProvider: new TokenAuthProvider([]), metrics: new MetricsCollector(), }); servers.push(server); diff --git a/tests/daemon/timeout.test.ts b/tests/daemon/timeout.test.ts index 6ca29f5..b0a5f84 100644 --- a/tests/daemon/timeout.test.ts +++ b/tests/daemon/timeout.test.ts @@ -34,6 +34,8 @@ function runCli( MCP2CLI_SOCKET_PATH: join(tempDir, "daemon.sock"), MCP2CLI_IDLE_TIMEOUT: "10", MCP2CLI_CACHE_DIR: join(tempDir, "schemas"), + MCP2CLI_TOKENS_FILE: join(tempDir, "nonexistent-tokens.json"), + MCP2CLI_AUTH_TOKEN: "", ...extraEnv, }; diff --git a/tests/integration/daemon.test.ts b/tests/integration/daemon.test.ts index 77f84df..eb576af 100644 --- a/tests/integration/daemon.test.ts +++ b/tests/integration/daemon.test.ts @@ -35,6 +35,8 @@ function runCli( MCP2CLI_PID_FILE: join(tempDir, "daemon.pid"), MCP2CLI_SOCKET_PATH: join(tempDir, "daemon.sock"), MCP2CLI_IDLE_TIMEOUT: "2", + MCP2CLI_TOKENS_FILE: join(tempDir, "nonexistent-tokens.json"), + MCP2CLI_AUTH_TOKEN: "", ...extraEnv, }; From 13421097a8561b19de2efd3e26a10d79528fbe44 Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Tue, 10 Mar 2026 00:30:56 -0400 Subject: [PATCH 3/3] fix: web UI login overlay race condition and CSS specificity bug - CSS: .hidden { display: none !important } to override .login-overlay flex - Race condition: authLock prevents 401 re-showing overlay during login - Debounce: require 2 consecutive auth failures before showing login - await refresh() before starting poll interval - Error handling: try/catch on doLogin() and init() Co-Authored-By: Claude Opus 4.6 --- src/daemon/ui.ts | 117 ++++++++++++++++++++++++++++++----------------- 1 file changed, 74 insertions(+), 43 deletions(-) diff --git a/src/daemon/ui.ts b/src/daemon/ui.ts index 20ac5ee..1ffc9c8 100644 --- a/src/daemon/ui.ts +++ b/src/daemon/ui.ts @@ -85,7 +85,7 @@ export function renderUI(): string { .toast.show { opacity: 1; } .toast-success { background: #238636; color: #fff; } .toast-error { background: #da3633; color: #fff; } - .hidden { display: none; } + .hidden { display: none !important; } .toolbar { display: flex; gap: 0.5rem; margin-bottom: 1rem; } .login-overlay { position: fixed; inset: 0; background: rgba(13,17,23,0.95); @@ -269,6 +269,8 @@ export function renderUI(): string { let editMode = null; // null = add, string = editing service name let pollTimer = null; let currentRole = 'admin'; // updated after auth +let authLock = false; // prevents api() 401 from re-showing login during transitions +let authFailCount = 0; // consecutive auth failures before showing login // --- Auth token management --- function getToken() { return sessionStorage.getItem('mcp2cli_token'); } @@ -277,7 +279,9 @@ function clearToken() { sessionStorage.removeItem('mcp2cli_token'); } function isAdmin() { return currentRole === 'admin'; } function showLogin(msg) { + if (authLock) return; // login in progress -- don't re-show overlay if (pollTimer) { clearInterval(pollTimer); pollTimer = null; } + authFailCount = 0; document.getElementById('loginOverlay').classList.remove('hidden'); document.getElementById('loginError').textContent = msg || ''; document.getElementById('tokenInput').value = ''; @@ -314,6 +318,7 @@ async function doLoginBasic() { return; } try { + authLock = true; // prevent api() 401 from re-showing login const res = await fetch('/api/auth/login', { method: 'POST', headers: { 'Content-Type': 'application/json' }, @@ -321,6 +326,7 @@ async function doLoginBasic() { }); const data = await res.json(); if (!data.success) { + authLock = false; document.getElementById('loginError').textContent = data.error || 'Invalid credentials'; return; } @@ -330,9 +336,12 @@ async function doLoginBasic() { document.getElementById('logoutBtn').classList.remove('hidden'); document.getElementById('authStatus').textContent = data.userId + ' (' + currentRole + ')'; applyRoleUI(); - refresh(); + await refresh(); + authLock = false; + authFailCount = 0; pollTimer = setInterval(refresh, 5000); } catch (e) { + authLock = false; document.getElementById('loginError').textContent = 'Connection error'; } } @@ -340,24 +349,33 @@ async function doLoginBasic() { async function doLogin() { const token = document.getElementById('tokenInput').value.trim(); if (!token) { document.getElementById('loginError').textContent = 'Token required'; return; } - // Test the token against /api/services - const res = await fetch('/api/services', { - headers: { 'Authorization': 'Bearer ' + token }, - }); - if (res.status === 401) { - document.getElementById('loginError').textContent = 'Invalid token'; - return; + try { + authLock = true; + // Test the token against /api/services + const res = await fetch('/api/services', { + headers: { 'Authorization': 'Bearer ' + token }, + }); + if (res.status === 401) { + authLock = false; + document.getElementById('loginError').textContent = 'Invalid token'; + return; + } + setToken(token); + // Fetch role info + const me = await fetch('/api/auth/me', { headers: { 'Authorization': 'Bearer ' + token } }).then(r => r.json()); + currentRole = me.role || 'viewer'; + document.getElementById('loginOverlay').classList.add('hidden'); + document.getElementById('logoutBtn').classList.remove('hidden'); + document.getElementById('authStatus').textContent = me.userId + ' (' + currentRole + ')'; + applyRoleUI(); + await refresh(); + authLock = false; + authFailCount = 0; + pollTimer = setInterval(refresh, 5000); + } catch (e) { + authLock = false; + document.getElementById('loginError').textContent = 'Connection error'; } - setToken(token); - // Fetch role info - const me = await fetch('/api/auth/me', { headers: { 'Authorization': 'Bearer ' + token } }).then(r => r.json()); - currentRole = me.role || 'viewer'; - document.getElementById('loginOverlay').classList.add('hidden'); - document.getElementById('logoutBtn').classList.remove('hidden'); - document.getElementById('authStatus').textContent = me.userId + ' (' + currentRole + ')'; - applyRoleUI(); - refresh(); - pollTimer = setInterval(refresh, 5000); } function doLogout() { @@ -382,7 +400,16 @@ async function api(method, path, body) { const opts = { method, headers }; if (body) opts.body = JSON.stringify(body); const res = await fetch('/api' + path, opts); - if (res.status === 401) { showLogin('Session expired -- please sign in again'); throw new Error('Unauthorized'); } + if (res.status === 401) { + authFailCount++; + // Require 2 consecutive failures before showing login (guards against transient errors) + if (authFailCount >= 2) { + clearToken(); + showLogin('Session expired -- please sign in again'); + } + throw new Error('Unauthorized'); + } + authFailCount = 0; // reset on any successful authed request return res.json(); } @@ -591,34 +618,38 @@ function esc(s) { const d = document.createElement('div'); d.textContent = s; re // Initial load: check if auth is required (async function init() { - // Probe /api/services without token to see if auth is enforced - const probe = await fetch('/api/services'); - if (probe.status === 401) { - // Auth required -- check sessionStorage for saved token - const saved = getToken(); - if (saved) { - const recheck = await fetch('/api/services', { - headers: { 'Authorization': 'Bearer ' + saved }, - }); - if (recheck.status === 401) { - clearToken(); + try { + // Probe /api/services without token to see if auth is enforced + const probe = await fetch('/api/services'); + if (probe.status === 401) { + // Auth required -- check sessionStorage for saved token + const saved = getToken(); + if (saved) { + const recheck = await fetch('/api/services', { + headers: { 'Authorization': 'Bearer ' + saved }, + }); + if (recheck.status === 401) { + clearToken(); + showLogin(''); + return; + } + // Token still valid -- fetch role + const me = await fetch('/api/auth/me', { headers: { 'Authorization': 'Bearer ' + saved } }).then(r => r.json()); + currentRole = me.role || 'viewer'; + document.getElementById('logoutBtn').classList.remove('hidden'); + document.getElementById('authStatus').textContent = me.userId + ' (' + currentRole + ')'; + applyRoleUI(); + } else { showLogin(''); return; } - // Token still valid -- fetch role - const me = await fetch('/api/auth/me', { headers: { 'Authorization': 'Bearer ' + saved } }).then(r => r.json()); - currentRole = me.role || 'viewer'; - document.getElementById('logoutBtn').classList.remove('hidden'); - document.getElementById('authStatus').textContent = me.userId + ' (' + currentRole + ')'; - applyRoleUI(); - } else { - showLogin(''); - return; } + // No auth or valid token -- go + refresh(); + pollTimer = setInterval(refresh, 5000); + } catch (e) { + showLogin('Connection error -- please try again'); } - // No auth or valid token -- go - refresh(); - pollTimer = setInterval(refresh, 5000); })();