From 4250a66eb1ccd6faec79213c9bf1b0f5dfaa58b4 Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Mon, 9 Mar 2026 18:44:08 -0400 Subject: [PATCH 1/6] feat: v1.3 advanced features -- schema caching, access control, gateway resilience, output formats, skill auto-regen Phase 15 - Schema Intelligence (ADV-01, ADV-02): - Schema caching with 24hr TTL at ~/.cache/mcp2cli/schemas/ - SHA-256 drift detection with per-tool change alerts - Cache clear/status commands, --fresh bypass flag Phase 16 - Access Control & Discovery (ADV-05, INFRA-03): - allowTools/blockTools glob patterns in services.json - Cross-service tool search via `mcp2cli grep` - TOOL_BLOCKED errors for policy-restricted tools Phase 17 - Gateway Resilience (INFRA-01, INFRA-02): - HTTP gateway with stdio fallback config - Circuit breaker (5 failures, 60s cooldown, disk-persisted) Phase 18 - Output Formats (ADV-03, ADV-04): - --format table/yaml/csv/ndjson alongside default json - Errors always JSON regardless of format flag Phase 19 - Skill Auto-Regeneration (ADV-06): - Drift-triggered skill file regeneration - Manual section preservation via MANUAL:START/END markers - --diff preview flag, access control integration Security hardening from agent swarm review: - Path traversal protection on cache/circuit-breaker file paths - Input validation on schema command service/tool names - ReDoS mitigation in glob patterns (collapse wildcards, length guard, cache) - Circuit breaker state enum validation - YAML key escaping for injection prevention - Exhaustiveness guard on format switch 678 tests, 0 failures, 0 TypeScript errors across 79 source files. Co-Authored-By: Claude Opus 4.6 --- README.md | 120 ++++++ src/access/filter.ts | 106 ++++++ src/access/index.ts | 11 + src/access/types.ts | 20 + src/cache/drift.ts | 149 ++++++++ src/cache/hash.ts | 49 +++ src/cache/index.ts | 26 ++ src/cache/storage.ts | 209 +++++++++++ src/cache/types.ts | 53 +++ src/cli/commands/cache.ts | 78 ++++ src/cli/commands/generate-skills.ts | 82 +++- src/cli/commands/grep.ts | 63 ++++ src/cli/commands/schema.ts | 111 +++++- src/cli/commands/service-help.ts | 9 +- src/cli/commands/tool-call.ts | 27 +- src/cli/help.ts | 20 + src/cli/index.ts | 4 + src/config/index.ts | 2 + src/config/schema.ts | 26 ++ src/daemon/drift-hook.ts | 108 ++++++ src/daemon/pool.ts | 89 ++++- src/format/csv.ts | 119 ++++++ src/format/index.ts | 44 +++ src/format/ndjson.ts | 24 ++ src/format/table.ts | 182 +++++++++ src/format/types.ts | 23 ++ src/format/yaml.ts | 184 +++++++++ src/generation/auto-regen.ts | 133 +++++++ src/generation/diff.ts | 187 ++++++++++ src/generation/index.ts | 20 + src/generation/preserve.ts | 191 ++++++++++ src/generation/templates.ts | 12 + src/invocation/index.ts | 1 + src/invocation/parse.ts | 46 ++- src/invocation/types.ts | 2 + src/resilience/circuit-breaker.ts | 260 +++++++++++++ src/resilience/index.ts | 19 + src/resilience/types.ts | 38 ++ src/types/index.ts | 1 + tests/access/filter.test.ts | 216 +++++++++++ tests/cache/drift.test.ts | 215 +++++++++++ tests/cache/hash.test.ts | 144 +++++++ tests/cache/storage.test.ts | 219 +++++++++++ tests/cli/grep.test.ts | 147 ++++++++ tests/format/csv.test.ts | 110 ++++++ tests/format/index.test.ts | 66 ++++ tests/format/ndjson.test.ts | 69 ++++ tests/format/table.test.ts | 121 ++++++ tests/format/yaml.test.ts | 104 ++++++ tests/generation/auto-regen.test.ts | 163 ++++++++ tests/generation/diff.test.ts | 231 ++++++++++++ tests/generation/preserve.test.ts | 217 +++++++++++ tests/invocation/parse.test.ts | 81 ++++ tests/invocation/validate.test.ts | 8 + tests/resilience/circuit-breaker.test.ts | 457 +++++++++++++++++++++++ tests/resilience/pool-fallback.test.ts | 283 ++++++++++++++ 56 files changed, 5676 insertions(+), 23 deletions(-) create mode 100644 src/access/filter.ts create mode 100644 src/access/index.ts create mode 100644 src/access/types.ts create mode 100644 src/cache/drift.ts create mode 100644 src/cache/hash.ts create mode 100644 src/cache/index.ts create mode 100644 src/cache/storage.ts create mode 100644 src/cache/types.ts create mode 100644 src/cli/commands/cache.ts create mode 100644 src/cli/commands/grep.ts create mode 100644 src/daemon/drift-hook.ts create mode 100644 src/format/csv.ts create mode 100644 src/format/index.ts create mode 100644 src/format/ndjson.ts create mode 100644 src/format/table.ts create mode 100644 src/format/types.ts create mode 100644 src/format/yaml.ts create mode 100644 src/generation/auto-regen.ts create mode 100644 src/generation/diff.ts create mode 100644 src/generation/preserve.ts create mode 100644 src/resilience/circuit-breaker.ts create mode 100644 src/resilience/index.ts create mode 100644 src/resilience/types.ts create mode 100644 tests/access/filter.test.ts create mode 100644 tests/cache/drift.test.ts create mode 100644 tests/cache/hash.test.ts create mode 100644 tests/cache/storage.test.ts create mode 100644 tests/cli/grep.test.ts create mode 100644 tests/format/csv.test.ts create mode 100644 tests/format/index.test.ts create mode 100644 tests/format/ndjson.test.ts create mode 100644 tests/format/table.test.ts create mode 100644 tests/format/yaml.test.ts create mode 100644 tests/generation/auto-regen.test.ts create mode 100644 tests/generation/diff.test.ts create mode 100644 tests/generation/preserve.test.ts create mode 100644 tests/resilience/circuit-breaker.test.ts create mode 100644 tests/resilience/pool-fallback.test.ts diff --git a/README.md b/README.md index e842cfd..d2b8ac9 100644 --- a/README.md +++ b/README.md @@ -249,6 +249,126 @@ mcp2cli n8n n8n_get_workflow --params '{"id": "abc123"}' This pattern keeps MCP tool definitions out of the agent's system prompt entirely. The agent only pays context cost when it actually needs to call a tool, and even then only for the specific tool's schema -- not all tools from all servers. +## v1.3 Advanced Features + +### Schema Caching + +Schemas are cached locally to avoid re-fetching on every invocation. Cached schemas live at `~/.cache/mcp2cli/schemas/` with a 24-hour TTL. Cache drift is detected via SHA-256 hashing -- if the upstream schema changes, the cache is automatically invalidated. + +```bash +# Check cache status (age, TTL, drift) +mcp2cli cache status + +# Clear all cached schemas +mcp2cli cache clear + +# Clear cache for a specific service +mcp2cli cache clear n8n + +# Bypass cache for a single schema lookup +mcp2cli schema n8n.n8n_list_workflows --fresh +``` + +Override the cache directory with `MCP2CLI_CACHE_DIR`. + +### Access Control + +Restrict which tools are exposed per service using `allowTools` and `blockTools` in `services.json`. Both accept glob patterns. + +```json +{ + "services": { + "n8n": { + "backend": "stdio", + "command": "npx", + "args": ["-y", "@anthropic/n8n-mcp"], + "allowTools": ["n8n_list_*", "n8n_get_*"], + "blockTools": ["n8n_delete_*"] + } + } +} +``` + +When both are present, `allowTools` is evaluated first (whitelist), then `blockTools` removes matches from the allowed set. + +#### Cross-Service Tool Search + +Search for tools across all services using cached schemas: + +```bash +# Find all tools matching a pattern +mcp2cli grep "workflow" + +# Regex patterns work +mcp2cli grep "delete|remove" +``` + +This searches cached schemas only -- no MCP connections are made. + +### Gateway Resilience + +HTTP/SSE services can define a `fallback` stdio config. If the remote gateway is unreachable, mcp2cli transparently falls back to a local MCP server process. + +```json +{ + "services": { + "n8n": { + "backend": "http", + "url": "http://mcp-gateway:3000/n8n", + "fallback": { + "command": "npx", + "args": ["-y", "@anthropic/n8n-mcp"] + } + } + } +} +``` + +A circuit breaker protects against repeated failures: after 5 consecutive failures the circuit opens and routes directly to fallback for 60 seconds before re-probing the primary. Circuit state is persisted to `~/.cache/mcp2cli/circuit-breaker/` so it survives process restarts. + +### Output Formats + +Control output format with the `--format` flag: + +```bash +mcp2cli n8n n8n_list_workflows --params '{}' --format table +mcp2cli n8n n8n_list_workflows --params '{}' --format yaml +mcp2cli n8n n8n_list_workflows --params '{}' --format csv +mcp2cli n8n n8n_list_workflows --params '{}' --format ndjson +``` + +| Format | Description | +|--------|-------------| +| `json` | Default. Structured JSON (unchanged from v1.0) | +| `table` | Aligned columns -- human-readable terminal output | +| `yaml` | YAML output | +| `csv` | RFC 4180 CSV -- pipe to spreadsheets or `csvtool` | +| `ndjson` | One JSON object per line -- for streaming pipelines | + +Error responses are always JSON regardless of the `--format` flag. + +### Skill Auto-Regeneration + +Generated skill files can be previewed and kept in sync with upstream schema changes: + +```bash +# Preview what would change without writing +mcp2cli generate-skills --diff n8n + +# Regenerate (preserves manual sections) +mcp2cli generate-skills n8n +``` + +Manual edits inside `MANUAL:START` / `MANUAL:END` markers are preserved across regeneration. When schema drift is detected (via the caching layer), skill regeneration can be triggered automatically. + +## v1.3 Environment Variables + +In addition to the variables listed above, v1.3 adds: + +| Variable | Default | Description | +|----------|---------|-------------| +| `MCP2CLI_CACHE_DIR` | `~/.cache/mcp2cli` | Base directory for schema cache and circuit breaker state | + ## Development ```bash diff --git a/src/access/filter.ts b/src/access/filter.ts new file mode 100644 index 0000000..5c35233 --- /dev/null +++ b/src/access/filter.ts @@ -0,0 +1,106 @@ +/** + * Tool access control filtering. + * Applies allow/block glob patterns to filter tool lists. + * Uses simple glob-to-regex conversion -- no external dependencies. + */ +import type { AccessPolicy, AccessCheckResult } from "./types.ts"; + +const regexCache = new Map(); + +/** + * Convert a simple glob pattern to a RegExp. + * Supports: * (match any chars), ? (match single char). + * All other regex special chars are escaped. + * Collapses consecutive wildcards to mitigate ReDoS. + * Caches compiled regexes to avoid recompilation. + */ +export function globToRegex(pattern: string): RegExp { + const cached = regexCache.get(pattern); + if (cached) return cached; + + if (pattern.length > 200) { + throw new Error(`Glob pattern too long (${pattern.length} chars, max 200)`); + } + + const escaped = pattern.replace(/[.+^${}()|[\]\\]/g, "\\$&"); + const regexStr = escaped.replace(/\*+/g, ".*").replace(/\?/g, "."); + const regex = new RegExp(`^${regexStr}$`); + regexCache.set(pattern, regex); + return regex; +} + +/** + * Check whether a tool name matches any pattern in a list. + */ +function matchesAny(toolName: string, patterns: string[]): boolean { + return patterns.some((pattern) => globToRegex(pattern).test(toolName)); +} + +/** + * Check a single tool name against an access policy. + * Returns whether the tool is allowed and, if blocked, the reason. + * + * Logic: + * 1. If allowTools is set and tool doesn't match any allow pattern -> blocked (not in allowlist) + * 2. If blockTools is set and tool matches any block pattern -> blocked (in blocklist) + * 3. Otherwise -> allowed + */ +export function checkToolAccess( + toolName: string, + policy: AccessPolicy, +): AccessCheckResult { + // Check allowlist first (if configured) + if (policy.allowTools && policy.allowTools.length > 0) { + if (!matchesAny(toolName, policy.allowTools)) { + return { + allowed: false, + reason: `Tool "${toolName}" is not in the allowTools list for this service`, + }; + } + } + + // Check blocklist + if (policy.blockTools && policy.blockTools.length > 0) { + if (matchesAny(toolName, policy.blockTools)) { + return { + allowed: false, + reason: `Tool "${toolName}" is blocked by policy (matches blockTools pattern)`, + }; + } + } + + return { allowed: true }; +} + +/** + * Filter a list of tool objects by access policy. + * Returns only tools that pass the allow/block checks. + * Works with any object that has a `name` property. + */ +export function filterTools( + tools: T[], + policy: AccessPolicy, +): T[] { + // Fast path: no policy configured + if ( + (!policy.allowTools || policy.allowTools.length === 0) && + (!policy.blockTools || policy.blockTools.length === 0) + ) { + return tools; + } + + return tools.filter((tool) => checkToolAccess(tool.name, policy).allowed); +} + +/** + * Extract an AccessPolicy from a service config object. + * Works with both StdioService and HttpService configs. + */ +export function extractPolicy( + serviceConfig: { allowTools?: string[]; blockTools?: string[] }, +): AccessPolicy { + return { + allowTools: serviceConfig.allowTools, + blockTools: serviceConfig.blockTools, + }; +} diff --git a/src/access/index.ts b/src/access/index.ts new file mode 100644 index 0000000..2cf10ae --- /dev/null +++ b/src/access/index.ts @@ -0,0 +1,11 @@ +/** + * Access control module -- barrel export. + * Tool allow/block lists with glob-based pattern matching. + */ +export type { AccessPolicy, AccessCheckResult } from "./types.ts"; +export { + globToRegex, + checkToolAccess, + filterTools, + extractPolicy, +} from "./filter.ts"; diff --git a/src/access/types.ts b/src/access/types.ts new file mode 100644 index 0000000..1cc24b6 --- /dev/null +++ b/src/access/types.ts @@ -0,0 +1,20 @@ +/** + * Access control types for tool allow/block lists. + * Used by filter.ts to apply glob-based access policies to tool lists. + */ + +/** Access policy for a service -- extracted from service config */ +export interface AccessPolicy { + /** Glob patterns for allowed tools (whitelist). If set, only matching tools pass. */ + allowTools?: string[]; + /** Glob patterns for blocked tools (blacklist). Applied after allowTools. */ + blockTools?: string[]; +} + +/** Result of checking a single tool against an access policy */ +export interface AccessCheckResult { + /** Whether the tool is allowed */ + allowed: boolean; + /** Reason for blocking, if blocked */ + reason?: string; +} diff --git a/src/cache/drift.ts b/src/cache/drift.ts new file mode 100644 index 0000000..de48917 --- /dev/null +++ b/src/cache/drift.ts @@ -0,0 +1,149 @@ +/** + * Schema drift detection. + * Compares cached tool hashes against live tool hashes to identify + * added, removed, or changed tools per service. + */ +import type { CachedToolSchema, DriftResult, ToolDrift } from "./types.ts"; +import { createLogger } from "../logger/index.ts"; + +const log = createLogger("drift"); + +/** + * Compare cached and live tool schemas to detect drift. + * Uses pre-computed SHA-256 hashes for efficient comparison. + * + * @param service - Service name for reporting + * @param cached - Previously cached tool schemas with hashes + * @param live - Current live tool schemas with hashes + * @param cachedAt - ISO timestamp of the cached version + * @returns DriftResult describing all changes + */ +export function detectDrift( + service: string, + cached: CachedToolSchema[], + live: CachedToolSchema[], + cachedAt: string, +): DriftResult { + const changes: ToolDrift[] = []; + + // Build lookup maps by tool name + const cachedMap = new Map(cached.map((t) => [t.name, t])); + const liveMap = new Map(live.map((t) => [t.name, t])); + + // Check for removed tools (in cached but not in live) + for (const [name] of cachedMap) { + if (!liveMap.has(name)) { + changes.push({ tool: name, type: "removed" }); + } + } + + // Check for added tools (in live but not in cached) + for (const [name] of liveMap) { + if (!cachedMap.has(name)) { + changes.push({ tool: name, type: "added" }); + } + } + + // Check for changed tools (hash mismatch) + for (const [name, liveSchema] of liveMap) { + const cachedSchema = cachedMap.get(name); + if (cachedSchema && cachedSchema.hash !== liveSchema.hash) { + const details = describeChange(cachedSchema, liveSchema); + changes.push({ tool: name, type: "changed", details }); + } + } + + // Sort changes by tool name for deterministic output + changes.sort((a, b) => a.tool.localeCompare(b.tool)); + + const result: DriftResult = { + service, + hasDrift: changes.length > 0, + changes, + cachedAt, + detectedAt: new Date().toISOString(), + }; + + if (result.hasDrift) { + logDrift(result); + } + + return result; +} + +/** + * Describe what changed between two versions of a tool schema. + * Compares inputSchema properties and required fields. + */ +function describeChange( + cached: CachedToolSchema, + live: CachedToolSchema, +): string { + const parts: string[] = []; + + // Check description change + if (cached.description !== live.description) { + parts.push("description changed"); + } + + // Compare inputSchema properties + const cachedProps = getSchemaProperties(cached.inputSchema); + const liveProps = getSchemaProperties(live.inputSchema); + + const addedProps = liveProps.filter((p) => !cachedProps.includes(p)); + const removedProps = cachedProps.filter((p) => !liveProps.includes(p)); + + if (addedProps.length > 0) { + parts.push(`params added: ${addedProps.join(", ")}`); + } + if (removedProps.length > 0) { + parts.push(`params removed: ${removedProps.join(", ")}`); + } + + // Compare required fields + const cachedRequired = getSchemaRequired(cached.inputSchema); + const liveRequired = getSchemaRequired(live.inputSchema); + + const newRequired = liveRequired.filter((r) => !cachedRequired.includes(r)); + const removedRequired = cachedRequired.filter((r) => !liveRequired.includes(r)); + + if (newRequired.length > 0) { + parts.push(`newly required: ${newRequired.join(", ")}`); + } + if (removedRequired.length > 0) { + parts.push(`no longer required: ${removedRequired.join(", ")}`); + } + + if (parts.length === 0) { + parts.push("schema structure changed"); + } + + return parts.join("; "); +} + +/** Extract property names from a JSON Schema object */ +function getSchemaProperties(schema: object): string[] { + const s = schema as { properties?: Record }; + return s.properties ? Object.keys(s.properties).sort() : []; +} + +/** Extract required field names from a JSON Schema object */ +function getSchemaRequired(schema: object): string[] { + const s = schema as { required?: string[] }; + return s.required ? [...s.required].sort() : []; +} + +/** Log drift detection results via structured logger */ +function logDrift(result: DriftResult): void { + const summary = result.changes.map((c) => { + const detail = c.details ? ` (${c.details})` : ""; + return `${c.type}: ${c.tool}${detail}`; + }); + + log.warn("schema_drift_detected", { + service: result.service, + changeCount: result.changes.length, + cachedAt: result.cachedAt, + changes: summary, + }); +} diff --git a/src/cache/hash.ts b/src/cache/hash.ts new file mode 100644 index 0000000..7f87385 --- /dev/null +++ b/src/cache/hash.ts @@ -0,0 +1,49 @@ +/** + * Schema surface hashing. + * Produces deterministic SHA-256 hashes from tool schemas using canonical JSON. + */ + +/** + * Produce canonical JSON: sorted keys recursively, no whitespace. + * Deterministic serialization ensures identical schemas always hash the same. + */ +export function canonicalJson(value: unknown): string { + return JSON.stringify(value, (_key, val) => { + if (val !== null && typeof val === "object" && !Array.isArray(val)) { + const sorted: Record = {}; + for (const k of Object.keys(val as Record).sort()) { + sorted[k] = (val as Record)[k]; + } + return sorted; + } + return val; + }); +} + +/** + * Compute SHA-256 hash of a tool's schema surface. + * The "surface" is: name + description + inputSchema + annotations. + * All serialized as canonical JSON before hashing. + */ +export async function hashToolSchema(tool: { + name: string; + description?: string; + inputSchema: object; + annotations?: object; +}): Promise { + const surface = canonicalJson({ + name: tool.name, + description: tool.description ?? "", + inputSchema: tool.inputSchema, + annotations: tool.annotations ?? null, + }); + + const encoder = new TextEncoder(); + const data = encoder.encode(surface); + const hashBuffer = await crypto.subtle.digest("SHA-256", data); + const hashArray = new Uint8Array(hashBuffer); + + return Array.from(hashArray) + .map((b) => b.toString(16).padStart(2, "0")) + .join(""); +} diff --git a/src/cache/index.ts b/src/cache/index.ts new file mode 100644 index 0000000..fefa3d5 --- /dev/null +++ b/src/cache/index.ts @@ -0,0 +1,26 @@ +/** + * Cache module -- barrel export. + * Schema caching with TTL, drift detection, and file-based persistence. + */ +export type { + CacheEntry, + CacheMetadata, + CachedToolSchema, + ToolDrift, + DriftResult, +} from "./types.ts"; + +export { canonicalJson, hashToolSchema } from "./hash.ts"; + +export { + getCacheDir, + getCacheFilePath, + readCache, + readCacheRaw, + writeCache, + clearCache, + listCachedServices, + isCacheExpired, +} from "./storage.ts"; + +export { detectDrift } from "./drift.ts"; diff --git a/src/cache/storage.ts b/src/cache/storage.ts new file mode 100644 index 0000000..7063c66 --- /dev/null +++ b/src/cache/storage.ts @@ -0,0 +1,209 @@ +/** + * File-based cache storage for schema data. + * Reads/writes cache files at ~/.cache/mcp2cli/schemas/{service}.json. + * Supports TTL checks, atomic writes, and selective/full cache clearing. + */ +import { mkdir, unlink, readdir, rename } from "node:fs/promises"; +import { dirname, join } from "node:path"; +import { createLogger } from "../logger/index.ts"; +import type { CacheEntry, CacheMetadata, CachedToolSchema } from "./types.ts"; + +const log = createLogger("cache"); + +/** Validate service name to prevent path traversal attacks. */ +function validateServiceName(service: string): string { + if (service.includes('/') || service.includes('\\') || service.includes('..')) { + throw new Error(`Invalid service name: "${service}"`); + } + return service; +} + +/** Default TTL: 24 hours in milliseconds */ +const DEFAULT_TTL_MS = 24 * 60 * 60 * 1000; + +/** + * Resolve the cache directory path. + * MCP2CLI_CACHE_DIR env var overrides the default location. + */ +export function getCacheDir(): string { + if (process.env.MCP2CLI_CACHE_DIR) { + return process.env.MCP2CLI_CACHE_DIR; + } + const home = process.env.HOME; + if (!home) { + throw new Error("Cannot determine cache path: HOME environment variable is not set"); + } + return join(home, ".cache", "mcp2cli", "schemas"); +} + +/** Get the cache file path for a given service */ +export function getCacheFilePath(service: string): string { + validateServiceName(service); + return join(getCacheDir(), `${service}.json`); +} + +/** + * Read cached schema for a service. + * Returns null if cache doesn't exist, is corrupted, or has expired. + */ +export async function readCache(service: string): Promise { + const filePath = getCacheFilePath(service); + const file = Bun.file(filePath); + + if (!(await file.exists())) { + log.debug("cache_miss", { service, reason: "not_found" }); + return null; + } + + let entry: CacheEntry; + try { + entry = (await file.json()) as CacheEntry; + } catch { + log.warn("cache_corrupted", { service, path: filePath }); + return null; + } + + // Validate structure + if (!entry.metadata || !Array.isArray(entry.tools)) { + log.warn("cache_invalid_structure", { service }); + return null; + } + + // Check TTL + if (isCacheExpired(entry.metadata)) { + log.debug("cache_expired", { + service, + cachedAt: entry.metadata.cachedAt, + ttlMs: entry.metadata.ttlMs, + }); + return null; + } + + log.debug("cache_hit", { service, toolCount: entry.tools.length }); + return entry; +} + +/** Check whether a cache entry has exceeded its TTL */ +export function isCacheExpired(metadata: CacheMetadata): boolean { + const cachedTime = new Date(metadata.cachedAt).getTime(); + const ttl = metadata.ttlMs || DEFAULT_TTL_MS; + return Date.now() - cachedTime > ttl; +} + +/** + * Write cache entry for a service. + * Uses atomic write (write to temp, then rename) to prevent corruption + * from concurrent access or crashes. + */ +export async function writeCache( + service: string, + tools: CachedToolSchema[], + ttlMs: number = DEFAULT_TTL_MS, +): Promise { + const filePath = getCacheFilePath(service); + const dir = dirname(filePath); + + // Ensure cache directory exists + await mkdir(dir, { recursive: true }); + + const entry: CacheEntry = { + metadata: { + service, + cachedAt: new Date().toISOString(), + ttlMs, + toolCount: tools.length, + }, + tools, + }; + + // Atomic write: write to temp file, then rename + const tempPath = `${filePath}.tmp.${process.pid}`; + try { + await Bun.write(tempPath, JSON.stringify(entry, null, 2)); + // Rename is atomic on POSIX filesystems + await rename(tempPath, filePath); + log.debug("cache_written", { service, toolCount: tools.length }); + } catch (err) { + // Clean up temp file on failure + await unlink(tempPath).catch(() => {}); + throw err; + } +} + +/** + * Clear cached schemas. + * If service is provided, clears only that service's cache. + * If no service, clears all cached schemas. + */ +export async function clearCache(service?: string): Promise { + const cacheDir = getCacheDir(); + + if (service) { + // Clear single service cache + const filePath = getCacheFilePath(service); + const file = Bun.file(filePath); + if (await file.exists()) { + await unlink(filePath); + log.info("cache_cleared", { service }); + return 1; + } + return 0; + } + + // Clear all caches + let cleared = 0; + try { + const entries = await readdir(cacheDir); + for (const entry of entries) { + if (entry.endsWith(".json")) { + await unlink(join(cacheDir, entry)); + cleared++; + } + } + } catch { + // Directory doesn't exist -- nothing to clear + return 0; + } + + log.info("cache_cleared_all", { count: cleared }); + return cleared; +} + +/** + * List all cached services. + * Returns service names that have cache files. + */ +export async function listCachedServices(): Promise { + const cacheDir = getCacheDir(); + try { + const entries = await readdir(cacheDir); + return entries + .filter((e) => e.endsWith(".json")) + .map((e) => e.replace(/\.json$/, "")); + } catch { + return []; + } +} + +/** + * Read cache without TTL check -- used by drift detection + * to compare even expired caches against live schemas. + */ +export async function readCacheRaw(service: string): Promise { + const filePath = getCacheFilePath(service); + const file = Bun.file(filePath); + + if (!(await file.exists())) { + return null; + } + + try { + const entry = (await file.json()) as CacheEntry; + if (!entry.metadata || !Array.isArray(entry.tools)) { + return null; + } + return entry; + } catch { + return null; + } +} diff --git a/src/cache/types.ts b/src/cache/types.ts new file mode 100644 index 0000000..b791591 --- /dev/null +++ b/src/cache/types.ts @@ -0,0 +1,53 @@ +/** + * Cache module types. + * Defines structures for schema caching with TTL and drift detection. + */ + +/** Cached schema for a single tool within a service */ +export interface CachedToolSchema { + name: string; + description: string; + inputSchema: object; + annotations?: object; + /** SHA-256 hash of canonical JSON (sorted keys, no whitespace) */ + hash: string; +} + +/** Metadata for a service's cached schema file */ +export interface CacheMetadata { + /** Service name this cache belongs to */ + service: string; + /** ISO timestamp when cache was written */ + cachedAt: string; + /** TTL in milliseconds (default 24h) */ + ttlMs: number; + /** Number of tools cached */ + toolCount: number; +} + +/** A complete cache entry -- metadata + tool schemas */ +export interface CacheEntry { + metadata: CacheMetadata; + tools: CachedToolSchema[]; +} + +/** Result of comparing a single tool's schema between cached and live */ +export interface ToolDrift { + tool: string; + type: "added" | "removed" | "changed"; + /** Present when type is "changed" -- describes what changed */ + details?: string; +} + +/** Result of drift detection for a service */ +export interface DriftResult { + service: string; + /** Whether any drift was detected */ + hasDrift: boolean; + /** Individual tool changes */ + changes: ToolDrift[]; + /** ISO timestamp of cached version */ + cachedAt: string; + /** ISO timestamp of detection */ + detectedAt: string; +} diff --git a/src/cli/commands/cache.ts b/src/cli/commands/cache.ts new file mode 100644 index 0000000..b9747ff --- /dev/null +++ b/src/cli/commands/cache.ts @@ -0,0 +1,78 @@ +/** + * Handle `mcp2cli cache ` -- manage schema cache. + * Supports: clear [service], status + */ +import { clearCache, listCachedServices, readCacheRaw } from "../../cache/index.ts"; +import { EXIT_CODES } from "../../types/index.ts"; +import type { CommandHandler } from "../../types/index.ts"; + +export const handleCache: CommandHandler = async (args: string[]) => { + const subcommand = args[0]; + + switch (subcommand) { + case "clear": + await handleCacheClear(args.slice(1)); + break; + case "status": + await handleCacheStatus(); + break; + default: + console.log( + [ + "Usage: mcp2cli cache ", + "", + "SUBCOMMANDS:", + " clear [service] Clear cached schemas (all or specific service)", + " status Show cache status for all services", + ].join("\n"), + ); + process.exitCode = subcommand ? EXIT_CODES.VALIDATION : EXIT_CODES.SUCCESS; + break; + } +}; + +async function handleCacheClear(args: string[]): Promise { + const service = args[0]; + const cleared = await clearCache(service); + + if (service) { + if (cleared > 0) { + console.log(`Cleared cache for service: ${service}`); + } else { + console.log(`No cache found for service: ${service}`); + } + } else { + console.log(`Cleared ${cleared} cached schema${cleared === 1 ? "" : "s"}`); + } + + process.exitCode = EXIT_CODES.SUCCESS; +} + +async function handleCacheStatus(): Promise { + const services = await listCachedServices(); + + if (services.length === 0) { + console.log("No cached schemas found."); + process.exitCode = EXIT_CODES.SUCCESS; + return; + } + + const lines: string[] = ["Cached schemas:", ""]; + + for (const service of services.sort()) { + const entry = await readCacheRaw(service); + if (entry) { + const age = Date.now() - new Date(entry.metadata.cachedAt).getTime(); + const ageHours = Math.round(age / (1000 * 60 * 60) * 10) / 10; + const ttlHours = Math.round(entry.metadata.ttlMs / (1000 * 60 * 60) * 10) / 10; + const expired = age > entry.metadata.ttlMs; + const status = expired ? " (expired)" : ""; + lines.push( + ` ${service}: ${entry.metadata.toolCount} tools, ${ageHours}h old (TTL: ${ttlHours}h)${status}`, + ); + } + } + + console.log(lines.join("\n")); + process.exitCode = EXIT_CODES.SUCCESS; +} diff --git a/src/cli/commands/generate-skills.ts b/src/cli/commands/generate-skills.ts index 95d7126..a705bbd 100644 --- a/src/cli/commands/generate-skills.ts +++ b/src/cli/commands/generate-skills.ts @@ -12,10 +12,18 @@ import { resolveOutputDir, planFileWrites, executeFileWrites, + extractManualSections, + injectManualSections, + parseExistingTools, + computeSkillDiff, + formatDiffPreview, } from "../../generation/index.ts"; +import { filterTools, extractPolicy } from "../../access/filter.ts"; import { EXIT_CODES } from "../../types/index.ts"; import type { ConflictMode, SkillTemplateInput } from "../../generation/types.ts"; import type { SchemaOutput } from "../../schema/types.ts"; +import type { ToolSummary } from "../../schema/types.ts"; +import { join } from "node:path"; /** * Extract trigger keywords from tool descriptions. @@ -84,13 +92,36 @@ function parseOutputFlag(args: string[]): string | undefined { return outputArg.split("=").slice(1).join("="); // rejoin in case path has = } +/** + * Read existing SKILL.md content from the output directory (if it exists). + * Returns null if no file exists. + */ +async function readExistingSkillFile( + outputDir: string, +): Promise { + const skillPath = join(outputDir, "SKILL.md"); + const file = Bun.file(skillPath); + if (await file.exists()) { + return file.text(); + } + return null; +} + /** * Generate skill files from MCP service schemas. * - * Usage: mcp2cli generate-skills [--dry-run] [--conflict=skip|force|merge] [--output=] + * Usage: mcp2cli generate-skills [--dry-run] [--diff] [--conflict=skip|force|merge] [--output=] + * + * Flags: + * --diff Preview what would change without writing files + * --dry-run Output plan without writing files + * --conflict How to handle existing files: skip|force|merge (default: skip) + * --output Output directory path * * Connects to the MCP server, introspects all tools, groups them by noun prefix, * generates a slim SKILL.md and per-group reference files. + * Applies access control (allow/block lists) before generating. + * Preserves manual sections (MANUAL:START/END) across regeneration. * * Pre-connection errors use printError + exitCode + return (never throw). * Post-connection errors propagate to main().catch(). @@ -110,6 +141,7 @@ export const handleGenerateSkills = async (args: string[]): Promise => { // Parse flags const dryRun = args.includes("--dry-run"); + const diffMode = args.includes("--diff"); let conflictMode = parseConflictMode(args); const outputFlag = parseOutputFlag(args); @@ -158,7 +190,7 @@ export const handleGenerateSkills = async (args: string[]): Promise => { try { // List all tools - const tools = await listToolsForService(connection.client); + let tools: ToolSummary[] = await listToolsForService(connection.client); if (tools.length === 0) { printError({ @@ -170,7 +202,21 @@ export const handleGenerateSkills = async (args: string[]): Promise => { return; } - // Get full schemas for each tool + // Apply access control -- filter tools by allow/block lists + const policy = extractPolicy(service); + tools = filterTools(tools, policy); + + if (tools.length === 0) { + printError({ + error: true, + code: "INPUT_VALIDATION_ERROR", + message: "All tools are blocked by access policy. No skills to generate.", + }); + process.exitCode = EXIT_CODES.VALIDATION; + return; + } + + // Get full schemas for each tool (already filtered by access control) const schemas: SchemaOutput[] = []; for (const tool of tools) { const schema = await getToolSchema(connection.client, tool.name, serviceName); @@ -193,8 +239,33 @@ export const handleGenerateSkills = async (args: string[]): Promise => { triggerKeywords, }; + // Resolve output directory + const outputDir = resolveOutputDir(serviceName, outputFlag); + + // --diff mode: preview changes without writing + if (diffMode) { + const existingContent = await readExistingSkillFile(outputDir); + const existingTools = existingContent + ? parseExistingTools(existingContent) + : []; + const diff = computeSkillDiff(serviceName, existingTools, tools); + const preview = formatDiffPreview(diff); + console.log(preview); + process.exitCode = EXIT_CODES.SUCCESS; + return; + } + // Generate SKILL.md - const skillMd = generateSkillMd(input); + let skillMd = generateSkillMd(input); + + // Preserve manual sections from existing SKILL.md + const existingContent = await readExistingSkillFile(outputDir); + if (existingContent) { + const manualSections = extractManualSections(existingContent); + if (manualSections.length > 0) { + skillMd = injectManualSections(skillMd, manualSections); + } + } // Token budget check const tokenCount = estimateTokens(skillMd); @@ -214,9 +285,6 @@ export const handleGenerateSkills = async (args: string[]): Promise => { referenceFiles.push(refPath); } - // Resolve output directory - const outputDir = resolveOutputDir(serviceName, outputFlag); - // Dry-run: output plan without writing files if (dryRun) { console.log(JSON.stringify({ diff --git a/src/cli/commands/grep.ts b/src/cli/commands/grep.ts new file mode 100644 index 0000000..dfffe70 --- /dev/null +++ b/src/cli/commands/grep.ts @@ -0,0 +1,63 @@ +/** + * Handle `mcp2cli grep "pattern"` -- search tool names and descriptions across cached services. + * Cache-only operation -- never connects to MCP servers. + */ +import { listCachedServices, readCacheRaw } from "../../cache/index.ts"; +import { EXIT_CODES } from "../../types/index.ts"; +import type { CommandHandler } from "../../types/index.ts"; + +export const handleGrep: CommandHandler = async (args: string[]) => { + const pattern = args[0]; + + if (!pattern) { + console.log( + [ + "Usage: mcp2cli grep ", + "", + "Search tool names and descriptions across all cached services.", + "Pattern matching is case-insensitive substring match.", + "", + "EXAMPLES:", + ' mcp2cli grep "workflow"', + ' mcp2cli grep "list"', + ].join("\n"), + ); + process.exitCode = EXIT_CODES.SUCCESS; + return; + } + + const services = await listCachedServices(); + + if (services.length === 0) { + console.log( + "No cached schemas found. Run 'mcp2cli --help' or 'mcp2cli schema .' to populate the cache.", + ); + process.exitCode = EXIT_CODES.SUCCESS; + return; + } + + const lowerPattern = pattern.toLowerCase(); + const matches: string[] = []; + + for (const service of services.sort()) { + const entry = await readCacheRaw(service); + if (!entry) continue; + + for (const tool of entry.tools) { + const nameMatch = tool.name.toLowerCase().includes(lowerPattern); + const descMatch = tool.description.toLowerCase().includes(lowerPattern); + + if (nameMatch || descMatch) { + matches.push(`${service}.${tool.name} -- ${tool.description}`); + } + } + } + + if (matches.length === 0) { + console.log(`No tools matching "${pattern}" found in cached schemas.`); + } else { + console.log(matches.join("\n")); + } + + process.exitCode = EXIT_CODES.SUCCESS; +}; diff --git a/src/cli/commands/schema.ts b/src/cli/commands/schema.ts index ac6296e..0215305 100644 --- a/src/cli/commands/schema.ts +++ b/src/cli/commands/schema.ts @@ -1,6 +1,8 @@ /** * Handle `mcp2cli schema .` -- get full input schema for a tool. * Routes through daemon by default. Set MCP2CLI_NO_DAEMON=1 for direct connection. + * ADV-01: Checks cache first, falls back to live fetch, caches result. + * Supports --fresh flag to bypass cache for one call. */ import { loadConfig } from "../../config/index.ts"; import { connectToService, connectToHttpService } from "../../connection/index.ts"; @@ -11,12 +13,19 @@ import { formatSchemaOutput, } from "../../schema/index.ts"; import type { SchemaOutput } from "../../schema/index.ts"; +import { readCache, writeCache, hashToolSchema } from "../../cache/index.ts"; +import type { CachedToolSchema } from "../../cache/index.ts"; +import { checkToolAccess, extractPolicy } from "../../access/index.ts"; +import { validateIdentifier } from "../../validation/pipelines.ts"; import { printError } from "../errors.ts"; import { EXIT_CODES } from "../../types/index.ts"; import type { CommandHandler } from "../../types/index.ts"; export const handleSchema: CommandHandler = async (args: string[]) => { - const target = args[0]; + // Extract --fresh flag before parsing positional args + const fresh = args.includes("--fresh"); + const positionalArgs = args.filter((a) => a !== "--fresh"); + const target = positionalArgs[0]; // No argument or empty string if (!target) { @@ -24,7 +33,7 @@ export const handleSchema: CommandHandler = async (args: string[]) => { error: true, code: "UNKNOWN_COMMAND", message: - "Usage: mcp2cli schema .", + "Usage: mcp2cli schema . [--fresh]", }); process.exitCode = EXIT_CODES.VALIDATION; return; @@ -44,6 +53,20 @@ export const handleSchema: CommandHandler = async (args: string[]) => { const { service: serviceName, tool: toolName } = parsed.value; + // Validate identifiers before any config/cache lookup + const svcCheck = validateIdentifier(serviceName, "service"); + if (!svcCheck.valid) { + printError({ error: true, code: "INPUT_VALIDATION_ERROR", message: svcCheck.message }); + process.exitCode = EXIT_CODES.VALIDATION; + return; + } + const toolCheck = validateIdentifier(toolName, "tool"); + if (!toolCheck.valid) { + printError({ error: true, code: "INPUT_VALIDATION_ERROR", message: toolCheck.message }); + process.exitCode = EXIT_CODES.VALIDATION; + return; + } + // Load config and resolve service const config = await loadConfig(); const service = config.services[serviceName]; @@ -58,6 +81,40 @@ export const handleSchema: CommandHandler = async (args: string[]) => { return; } + // Access control: check if tool is blocked by policy + const policy = extractPolicy(service); + const accessResult = checkToolAccess(toolName, policy); + if (!accessResult.allowed) { + printError({ + error: true, + code: "TOOL_BLOCKED", + message: `Tool '${toolName}' is blocked by access policy for service '${serviceName}'`, + }); + process.exitCode = EXIT_CODES.VALIDATION; + return; + } + + // ADV-01: Check cache first (unless --fresh) + if (!fresh) { + const cached = await readCache(serviceName); + if (cached) { + const cachedTool = cached.tools.find((t) => t.name === toolName); + if (cachedTool) { + const output: SchemaOutput = { + tool: cachedTool.name, + description: cachedTool.description, + inputSchema: cachedTool.inputSchema, + annotations: cachedTool.annotations, + usage: `mcp2cli ${serviceName} ${cachedTool.name}`, + }; + console.log(formatSchemaOutput(output)); + process.exitCode = EXIT_CODES.SUCCESS; + return; + } + // Tool not in cache -- fall through to live fetch + } + } + const daemonEnabled = !process.env.MCP2CLI_NO_DAEMON; if (daemonEnabled) { @@ -65,7 +122,10 @@ export const handleSchema: CommandHandler = async (args: string[]) => { const result = await getSchemaViaDaemon({ service: serviceName, tool: toolName }); if (result.success) { - console.log(formatSchemaOutput(result.result as SchemaOutput)); + const schemaOutput = result.result as SchemaOutput; + console.log(formatSchemaOutput(schemaOutput)); + // Cache the result from daemon + await cacheSchemaResult(serviceName, schemaOutput); process.exitCode = EXIT_CODES.SUCCESS; } else { printError({ @@ -98,9 +158,54 @@ export const handleSchema: CommandHandler = async (args: string[]) => { process.exitCode = EXIT_CODES.VALIDATION; } else { console.log(formatSchemaOutput(result)); + // Cache the result from direct connection + await cacheSchemaResult(serviceName, result); process.exitCode = EXIT_CODES.SUCCESS; } } finally { await connection.close(); } }; + +/** + * Cache a single tool schema result. + * Merges into existing cache for the service (upsert by tool name). + */ +async function cacheSchemaResult( + serviceName: string, + schema: SchemaOutput, +): Promise { + try { + // Read existing cache to merge + const { readCacheRaw } = await import("../../cache/index.ts"); + const existing = await readCacheRaw(serviceName); + const existingTools = existing?.tools ?? []; + + const hash = await hashToolSchema({ + name: schema.tool, + description: schema.description, + inputSchema: schema.inputSchema, + annotations: schema.annotations, + }); + + const newTool: CachedToolSchema = { + name: schema.tool, + description: schema.description, + inputSchema: schema.inputSchema, + annotations: schema.annotations, + hash, + }; + + // Replace existing entry or append + const toolIndex = existingTools.findIndex((t) => t.name === schema.tool); + if (toolIndex >= 0) { + existingTools[toolIndex] = newTool; + } else { + existingTools.push(newTool); + } + + await writeCache(serviceName, existingTools); + } catch { + // Cache write failure is non-fatal -- log and continue + } +} diff --git a/src/cli/commands/service-help.ts b/src/cli/commands/service-help.ts index a1190a3..ffc089e 100644 --- a/src/cli/commands/service-help.ts +++ b/src/cli/commands/service-help.ts @@ -7,6 +7,7 @@ import { connectToService, connectToHttpService } from "../../connection/index.t import { listToolsViaDaemon } from "../../process/index.ts"; import { listToolsForService, formatToolListing } from "../../schema/index.ts"; import type { ToolListing, ToolSummary } from "../../schema/index.ts"; +import { filterTools, extractPolicy } from "../../access/index.ts"; import { isAiMode } from "../help.ts"; import { printError } from "../errors.ts"; import { EXIT_CODES } from "../../types/index.ts"; @@ -36,7 +37,9 @@ export async function handleServiceHelp( const result = await listToolsViaDaemon({ service: serviceName }); if (result.success) { - const tools = result.result as ToolSummary[]; + const allTools = result.result as ToolSummary[]; + const policy = extractPolicy(service); + const tools = filterTools(allTools, policy); const listing: ToolListing = { service: serviceName, description: service.description ?? "(no description)", @@ -67,7 +70,9 @@ export async function handleServiceHelp( : await connectToService(service); try { - const tools = await listToolsForService(connection.client); + const allTools = await listToolsForService(connection.client); + const policy = extractPolicy(service); + const tools = filterTools(allTools, policy); const listing: ToolListing = { service: serviceName, diff --git a/src/cli/commands/tool-call.ts b/src/cli/commands/tool-call.ts index eed6cf8..eb592e0 100644 --- a/src/cli/commands/tool-call.ts +++ b/src/cli/commands/tool-call.ts @@ -10,10 +10,12 @@ import { loadConfig } from "../../config/index.ts"; import { connectToService, connectToHttpService } from "../../connection/index.ts"; import { callViaDaemon, getSchemaViaDaemon } from "../../process/index.ts"; import { getToolSchema } from "../../schema/introspect.ts"; +import { checkToolAccess, extractPolicy } from "../../access/index.ts"; import { printError } from "../errors.ts"; import { EXIT_CODES } from "../../types/index.ts"; import type { ErrorCode } from "../../types/index.ts"; import type { SchemaOutput } from "../../schema/types.ts"; +import { formatOutput } from "../../format/index.ts"; /** * Map daemon error codes to semantic exit codes. @@ -73,6 +75,19 @@ export async function handleToolCall(args: string[]): Promise { return; } + // Access control: check if tool is blocked by policy + const policy = extractPolicy(service); + const accessResult = checkToolAccess(parsed.value.toolName, policy); + if (!accessResult.allowed) { + printError({ + error: true, + code: "TOOL_BLOCKED", + message: `Tool '${parsed.value.toolName}' is blocked by access policy for service '${parsed.value.serviceName}'`, + }); + process.exitCode = EXIT_CODES.VALIDATION; + return; + } + const daemonEnabled = !process.env.MCP2CLI_NO_DAEMON; if (daemonEnabled) { @@ -119,15 +134,15 @@ export async function handleToolCall(args: string[]): Promise { if (result.success) { // Field masking on successful daemon response + let outputData = result.result; if (parsed.value.fields.length > 0) { const { masked, missing } = applyFieldMask(result.result, parsed.value.fields); for (const field of missing) { process.stderr.write(`warning: field "${field}" not found in response\n`); } - console.log(JSON.stringify({ success: true, result: masked })); - } else { - console.log(JSON.stringify(result)); + outputData = masked; } + console.log(formatOutput(outputData, parsed.value.format)); process.exitCode = EXIT_CODES.SUCCESS; } else { printError({ @@ -187,15 +202,15 @@ export async function handleToolCall(args: string[]): Promise { const output = formatToolResult(result as Parameters[0]); // 8. Field masking on successful response + let outputData = output.result; if (parsed.value.fields.length > 0) { const { masked, missing } = applyFieldMask(output.result, parsed.value.fields); for (const field of missing) { process.stderr.write(`warning: field "${field}" not found in response\n`); } - console.log(JSON.stringify({ success: true, result: masked })); - } else { - console.log(JSON.stringify(output)); + outputData = masked; } + console.log(formatOutput(outputData, parsed.value.format)); process.exitCode = EXIT_CODES.SUCCESS; } finally { await connection.close(); diff --git a/src/cli/help.ts b/src/cli/help.ts index 745fd73..b3b00c6 100644 --- a/src/cli/help.ts +++ b/src/cli/help.ts @@ -36,6 +36,12 @@ export function printHelp(args?: string[]): void { description: "List available tools for a service", usage: "mcp2cli --help", }, + { + name: " --format", + description: + "Output in a specific format: json (default), table, yaml, csv, ndjson", + usage: "mcp2cli --format table", + }, { name: "schema", description: "Get parameter schema for a service tool", @@ -51,6 +57,16 @@ export function printHelp(args?: string[]): void { description: "Generate skill files from service schemas", usage: "mcp2cli generate-skills ", }, + { + name: "cache", + description: "Manage schema cache (clear, status)", + usage: "mcp2cli cache ", + }, + { + name: "grep", + description: "Search tool names and descriptions across cached services", + usage: 'mcp2cli grep "pattern"', + }, ], examples: [ "mcp2cli services", @@ -75,6 +91,8 @@ export function printHelp(args?: string[]): void { " schema Get parameter schema for a service tool", " bootstrap Auto-configure from claude.json MCP config", " generate-skills Generate skill files from service schemas", + " cache Manage schema cache (clear, status)", + ' grep Search tool names/descriptions across cached services', "", "EXAMPLES:", " mcp2cli services", @@ -86,6 +104,8 @@ export function printHelp(args?: string[]): void { " --help, -h Show this help message", " --version Show version number", " --help-format=ai Output help as JSON for AI agents", + " --fresh Bypass schema cache for one call", + " --format Output format: json (default), table, yaml, csv, ndjson", ]; console.log(lines.join("\n")); } diff --git a/src/cli/index.ts b/src/cli/index.ts index 54006fd..f258b4d 100644 --- a/src/cli/index.ts +++ b/src/cli/index.ts @@ -9,6 +9,8 @@ import { handleToolCall } from "./commands/tool-call.ts"; import { handleDaemonStop, handleDaemonStatus } from "./commands/daemon.ts"; import { handleBootstrap } from "./commands/bootstrap.ts"; import { handleGenerateSkills } from "./commands/generate-skills.ts"; +import { handleCache } from "./commands/cache.ts"; +import { handleGrep } from "./commands/grep.ts"; import { ConfigError } from "../config/index.ts"; import { ConnectionError } from "../connection/index.ts"; import { ToolError } from "../invocation/errors.ts"; @@ -40,6 +42,8 @@ const handleDaemonDispatch: CommandHandler = async (args: string[]) => { const COMMANDS: Record = { services: handleServices, schema: handleSchema, + cache: handleCache, + grep: handleGrep, daemon: handleDaemonDispatch, bootstrap: handleBootstrap, "generate-skills": handleGenerateSkills, diff --git a/src/config/index.ts b/src/config/index.ts index 89f3ea4..3c1345c 100644 --- a/src/config/index.ts +++ b/src/config/index.ts @@ -1,5 +1,6 @@ export { StdioServiceSchema, + StdioFallbackSchema, HttpServiceSchema, ServiceSchema, ServicesConfigSchema, @@ -7,6 +8,7 @@ export { export type { StdioService, + StdioFallback, HttpService, ServiceConfig, ServicesConfig, diff --git a/src/config/schema.ts b/src/config/schema.ts index 85ce691..79bdb96 100644 --- a/src/config/schema.ts +++ b/src/config/schema.ts @@ -1,5 +1,16 @@ import { z } from "zod"; +/** + * Tool access control fields shared by all service backends. + * allowTools: glob patterns for tools to include (whitelist). If set, only matching tools are visible. + * blockTools: glob patterns for tools to exclude (blacklist). Applied after allowTools. + * Both use simple glob syntax: * matches any chars, ? matches single char. + */ +const accessControlFields = { + allowTools: z.array(z.string()).optional(), + blockTools: z.array(z.string()).optional(), +}; + /** * Stdio-based MCP service configuration. * Launches a local process and communicates via stdin/stdout. @@ -10,17 +21,31 @@ export const StdioServiceSchema = z.object({ command: z.string().min(1), args: z.array(z.string()).optional().default([]), env: z.record(z.string(), z.string()).optional().default({}), + ...accessControlFields, +}); + +/** + * Stdio fallback configuration for HTTP services. + * When the HTTP gateway is unreachable, the CLI falls back to this local process. + */ +export const StdioFallbackSchema = z.object({ + command: z.string().min(1), + args: z.array(z.string()).optional().default([]), + env: z.record(z.string(), z.string()).optional().default({}), }); /** * HTTP-based MCP service configuration. * Connects to a remote MCP server over HTTP/SSE. + * Optional fallback launches a local stdio process when the gateway is unreachable. */ export const HttpServiceSchema = z.object({ description: z.string().optional(), backend: z.literal("http"), url: z.string().url(), headers: z.record(z.string(), z.string()).optional().default({}), + fallback: StdioFallbackSchema.optional(), + ...accessControlFields, }); /** @@ -46,6 +71,7 @@ export const ServicesConfigSchema = z.object({ /** Inferred types from schemas */ export type StdioService = z.infer; +export type StdioFallback = z.infer; export type HttpService = z.infer; export type ServiceConfig = z.infer; export type ServicesConfig = z.infer; diff --git a/src/daemon/drift-hook.ts b/src/daemon/drift-hook.ts new file mode 100644 index 0000000..89c6d8d --- /dev/null +++ b/src/daemon/drift-hook.ts @@ -0,0 +1,108 @@ +/** + * Drift detection hook for the connection pool. + * ADV-02: On new connection, compares live tool schemas against cached versions + * and logs drift alerts identifying which tools changed and how. + * ADV-06: Triggers auto-regeneration of skill files when drift is detected. + */ +import type { McpConnection } from "../connection/types.ts"; +import { readCacheRaw, writeCache, hashToolSchema, detectDrift } from "../cache/index.ts"; +import type { CachedToolSchema } from "../cache/index.ts"; +import type { AccessPolicy } from "../access/types.ts"; +import { autoRegenerateSkills } from "../generation/auto-regen.ts"; +import { createLogger } from "../logger/index.ts"; + +const log = createLogger("drift-hook"); + +/** + * Check for schema drift on a new connection. + * Fetches live tool list, hashes each tool, compares against cached hashes. + * When drift is detected, triggers auto-regeneration of skill files. + * Non-blocking -- errors are logged but never propagated. + * + * @param serviceName - The service to check + * @param connection - Active MCP connection + * @param policy - Optional access policy for filtering tools during regeneration + */ +export async function checkDriftOnConnect( + serviceName: string, + connection: McpConnection, + policy?: AccessPolicy, +): Promise { + try { + // Fetch live tool list + const response = await connection.client.listTools(); + const liveTools = response.tools; + + // Hash all live tools + const liveSchemas: CachedToolSchema[] = await Promise.all( + liveTools.map(async (tool) => ({ + name: tool.name, + description: tool.description ?? "(no description)", + inputSchema: tool.inputSchema, + annotations: tool.annotations as object | undefined, + hash: await hashToolSchema({ + name: tool.name, + description: tool.description, + inputSchema: tool.inputSchema, + annotations: tool.annotations as object | undefined, + }), + })), + ); + + // Read existing cache (raw -- ignoring TTL for drift comparison) + const cached = await readCacheRaw(serviceName); + + if (cached && cached.tools.length > 0) { + // Compare cached vs live + const drift = detectDrift( + serviceName, + cached.tools, + liveSchemas, + cached.metadata.cachedAt, + ); + + if (drift.hasDrift) { + log.warn("drift_check_complete", { + service: serviceName, + driftDetected: true, + changeCount: drift.changes.length, + }); + + // ADV-06: Auto-regenerate skill files on drift + const toolSummaries = liveTools.map((t) => ({ + name: t.name, + description: t.description ?? "(no description)", + })); + const regenResult = await autoRegenerateSkills( + serviceName, + toolSummaries, + policy ?? {}, + ); + + if (regenResult.regenerated) { + log.info("skill_auto_regen_triggered", { + service: serviceName, + filesWritten: regenResult.filesWritten.length, + manualSectionsPreserved: regenResult.manualSectionsPreserved, + }); + } + } else { + log.debug("drift_check_complete", { + service: serviceName, + driftDetected: false, + }); + } + } else { + log.debug("drift_check_skipped", { + service: serviceName, + reason: "no_cached_schemas", + }); + } + + // Always update cache with fresh schemas after drift check + await writeCache(serviceName, liveSchemas); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + log.warn("drift_check_failed", { service: serviceName, error: message }); + } +} diff --git a/src/daemon/pool.ts b/src/daemon/pool.ts index 6a91994..9450784 100644 --- a/src/daemon/pool.ts +++ b/src/daemon/pool.ts @@ -7,8 +7,15 @@ import { connectToService, connectToHttpService } from "../connection/index.ts"; import { ConnectionError } from "../connection/errors.ts"; import type { McpConnection } from "../connection/types.ts"; -import type { ServicesConfig } from "../config/index.ts"; +import type { ServicesConfig, HttpService } from "../config/index.ts"; import { createLogger } from "../logger/index.ts"; +import { checkDriftOnConnect } from "./drift-hook.ts"; +import { extractPolicy } from "../access/filter.ts"; +import { + shouldAttemptHttp, + recordFailure, + recordSuccess, +} from "../resilience/index.ts"; const log = createLogger("pool"); @@ -94,7 +101,7 @@ export class ConnectionPool { log.info("connecting", { service: serviceName }); let connectFn: () => Promise; if (serviceConfig.backend === "http") { - connectFn = () => connectToHttpService(serviceConfig); + connectFn = () => this.connectHttpWithFallback(serviceName, serviceConfig); } else if (serviceConfig.backend === "stdio") { connectFn = () => connectToService(serviceConfig); } else { @@ -112,6 +119,10 @@ export class ConnectionPool { }); this.pending.delete(serviceName); log.info("connected", { service: serviceName }); + // ADV-02: Fire-and-forget drift check on new connection + // ADV-06: Pass access policy for skill auto-regeneration filtering + const policy = extractPolicy(serviceConfig); + checkDriftOnConnect(serviceName, connection, policy).catch(() => {}); return connection; }, (err) => { @@ -154,6 +165,80 @@ export class ConnectionPool { return Array.from(this.connections.keys()); } + /** + * INFRA-01/02: Connect to an HTTP service with circuit breaker and stdio fallback. + * 1. If circuit is open, skip HTTP and go directly to fallback. + * 2. If circuit is closed/half-open, attempt HTTP connection. + * 3. On HTTP failure, record failure in circuit breaker. + * 4. If fallback is configured, fall back to stdio; otherwise re-throw. + */ + private async connectHttpWithFallback( + serviceName: string, + serviceConfig: HttpService, + ): Promise { + const hasFallback = !!serviceConfig.fallback; + const attemptHttp = await shouldAttemptHttp(serviceName); + + // Circuit is open -- skip HTTP entirely + if (!attemptHttp) { + if (hasFallback) { + log.warn("fallback_circuit_open", { + service: serviceName, + url: serviceConfig.url, + }); + return this.connectFallback(serviceName, serviceConfig); + } + // No fallback configured -- report the open circuit as an error + throw new ConnectionError( + `Circuit breaker open for ${serviceName} and no fallback configured`, + `url: ${serviceConfig.url}`, + ); + } + + // Attempt HTTP connection + try { + const connection = await connectToHttpService(serviceConfig); + await recordSuccess(serviceName); + return connection; + } catch (err) { + await recordFailure(serviceName); + const message = err instanceof Error ? err.message : String(err); + + if (hasFallback) { + log.warn("fallback_http_failed", { + service: serviceName, + url: serviceConfig.url, + error: message, + }); + return this.connectFallback(serviceName, serviceConfig); + } + + // No fallback -- propagate the original error + throw err; + } + } + + /** + * Connect via the stdio fallback defined in an HTTP service config. + * Constructs a StdioService-compatible object from the fallback fields. + */ + private async connectFallback( + serviceName: string, + serviceConfig: HttpService, + ): Promise { + const fb = serviceConfig.fallback!; + log.warn("using_stdio_fallback", { + service: serviceName, + command: fb.command, + }); + return connectToService({ + backend: "stdio" as const, + command: fb.command, + args: fb.args, + env: fb.env, + }); + } + /** * MEM-05: Lightweight health check using listTools as a ping. * Returns false if the connection is dead or times out. diff --git a/src/format/csv.ts b/src/format/csv.ts new file mode 100644 index 0000000..c112ef1 --- /dev/null +++ b/src/format/csv.ts @@ -0,0 +1,119 @@ +/** + * CSV formatter -- RFC 4180 compliant. + * Handles proper quoting for values containing commas, double quotes, and newlines. + * No external dependencies. + */ + +/** + * Characters that trigger quoting per RFC 4180. + */ +const NEEDS_QUOTING = /[",\n\r]/; + +/** + * Quote a CSV field value per RFC 4180: + * - Enclose in double quotes if field contains comma, quote, or newline + * - Double any embedded double quotes + */ +function quoteField(value: string): string { + if (NEEDS_QUOTING.test(value)) { + return `"${value.replace(/"/g, '""')}"`; + } + return value; +} + +/** + * Convert a value to a CSV-safe string. + */ +function toCsvString(value: unknown): string { + if (value === null || value === undefined) { + return ""; + } + if (typeof value === "object") { + return JSON.stringify(value); + } + return String(value); +} + +/** + * Format data as CSV. + * - Array of objects: header row from keys + data rows + * - Single object: header row + single data row + * - Array of primitives: single VALUE column + * - Primitive: single value + */ +export function formatCsv(data: unknown): string { + if (data === null || data === undefined) { + return ""; + } + + // Single object -> one row + if (typeof data === "object" && !Array.isArray(data)) { + return formatObjectCsv(data as Record); + } + + // Array + if (Array.isArray(data)) { + if (data.length === 0) return ""; + + // Array of primitives -> single column + if (typeof data[0] !== "object" || data[0] === null) { + return formatPrimitiveCsv(data); + } + + return formatArrayCsv(data as Record[]); + } + + // Primitive + return quoteField(String(data)); +} + +/** + * Format a single object as CSV with header + one data row. + */ +function formatObjectCsv(obj: Record): string { + const keys = Object.keys(obj); + if (keys.length === 0) return ""; + + const header = keys.map(quoteField).join(","); + const row = keys.map((k) => quoteField(toCsvString(obj[k]))).join(","); + return `${header}\n${row}`; +} + +/** + * Format an array of primitives as CSV with a VALUE header. + */ +function formatPrimitiveCsv(arr: unknown[]): string { + const lines = ["VALUE"]; + for (const item of arr) { + lines.push(quoteField(toCsvString(item))); + } + return lines.join("\n"); +} + +/** + * Format an array of objects as CSV. + * Collects all unique keys across all objects for the header. + */ +function formatArrayCsv(rows: Record[]): string { + // Collect all unique keys preserving insertion order + const keySet = new Set(); + for (const row of rows) { + for (const key of Object.keys(row)) { + keySet.add(key); + } + } + const columns = Array.from(keySet); + + const lines: string[] = []; + + // Header row + lines.push(columns.map(quoteField).join(",")); + + // Data rows + for (const row of rows) { + const values = columns.map((col) => quoteField(toCsvString(row[col]))); + lines.push(values.join(",")); + } + + return lines.join("\n"); +} diff --git a/src/format/index.ts b/src/format/index.ts new file mode 100644 index 0000000..77a3aaf --- /dev/null +++ b/src/format/index.ts @@ -0,0 +1,44 @@ +/** + * Output format module barrel export. + * Dispatches data formatting based on the requested output format. + */ +export { formatTable } from "./table.ts"; +export { formatYaml } from "./yaml.ts"; +export { formatCsv } from "./csv.ts"; +export { formatNdjson } from "./ndjson.ts"; +export { isValidFormat, VALID_FORMATS } from "./types.ts"; +export type { OutputFormat } from "./types.ts"; + +import type { OutputFormat } from "./types.ts"; +import { formatTable } from "./table.ts"; +import { formatYaml } from "./yaml.ts"; +import { formatCsv } from "./csv.ts"; +import { formatNdjson } from "./ndjson.ts"; + +/** + * Format data according to the specified output format. + * + * For "json" format, wraps in the standard { success: true, result: ... } envelope. + * For other formats, renders the raw result data directly. + */ +export function formatOutput( + data: unknown, + format: OutputFormat, +): string { + switch (format) { + case "json": + return JSON.stringify({ success: true, result: data }); + case "table": + return formatTable(data); + case "yaml": + return formatYaml(data); + case "csv": + return formatCsv(data); + case "ndjson": + return formatNdjson(data); + default: { + const _exhaustive: never = format; + throw new Error(`Unsupported format: ${_exhaustive}`); + } + } +} diff --git a/src/format/ndjson.ts b/src/format/ndjson.ts new file mode 100644 index 0000000..050ba9b --- /dev/null +++ b/src/format/ndjson.ts @@ -0,0 +1,24 @@ +/** + * NDJSON (Newline Delimited JSON) formatter. + * If result is an array, outputs one JSON object per line. + * If single object, outputs one line. + */ + +/** + * Format data as NDJSON. + * - Array: one JSON line per element + * - Non-array: single JSON line + * - null/undefined: empty string + */ +export function formatNdjson(data: unknown): string { + if (data === null || data === undefined) { + return "null"; + } + + if (Array.isArray(data)) { + if (data.length === 0) return ""; + return data.map((item) => JSON.stringify(item)).join("\n"); + } + + return JSON.stringify(data); +} diff --git a/src/format/table.ts b/src/format/table.ts new file mode 100644 index 0000000..ba37ab0 --- /dev/null +++ b/src/format/table.ts @@ -0,0 +1,182 @@ +/** + * Table formatter: renders data as aligned columns with headers. + * Auto-detects column widths, right-aligns numbers, truncates long values. + */ + +const MAX_COLUMN_WIDTH = 40; +const TRUNCATION_SUFFIX = "..."; + +/** + * Flatten a value to a row-friendly record. + * Nested objects become JSON strings. + */ +function flattenRow(row: Record): Record { + const flat: Record = {}; + for (const [key, value] of Object.entries(row)) { + if (value === null || value === undefined) { + flat[key] = ""; + } else if (typeof value === "object") { + flat[key] = JSON.stringify(value); + } else { + flat[key] = String(value); + } + } + return flat; +} + +/** + * Check if a string represents a numeric value. + */ +function isNumeric(value: string): boolean { + if (value === "") return false; + return !isNaN(Number(value)) && isFinite(Number(value)); +} + +/** + * Truncate a string to max length, appending "..." if truncated. + */ +function truncate(value: string, maxWidth: number): string { + if (value.length <= maxWidth) return value; + return value.slice(0, maxWidth - TRUNCATION_SUFFIX.length) + TRUNCATION_SUFFIX; +} + +/** + * Format data as an aligned text table. + * - If data is an array of objects, each object is a row. + * - If data is a single object, renders as key-value pairs. + * - Numbers are right-aligned, strings left-aligned. + * - Long values are truncated to MAX_COLUMN_WIDTH. + */ +export function formatTable(data: unknown): string { + if (data === null || data === undefined) { + return "(empty)"; + } + + // Single object -> key/value table + if (typeof data === "object" && !Array.isArray(data)) { + return formatKeyValueTable(data as Record); + } + + // Array of objects -> columnar table + if (Array.isArray(data)) { + if (data.length === 0) return "(empty)"; + + // Array of primitives -> single column + if (typeof data[0] !== "object" || data[0] === null) { + return formatPrimitiveArray(data); + } + + return formatColumnarTable(data as Record[]); + } + + // Primitive -> just stringify + return String(data); +} + +/** + * Render a single object as a two-column key/value table. + */ +function formatKeyValueTable(obj: Record): string { + const rows = flattenRow(obj); + const keys = Object.keys(rows); + if (keys.length === 0) return "(empty)"; + + const keyWidth = Math.min( + Math.max(...keys.map((k) => k.length)), + MAX_COLUMN_WIDTH, + ); + + const lines: string[] = []; + // Header + lines.push( + `${"KEY".padEnd(keyWidth)} VALUE`, + ); + lines.push(`${"-".repeat(keyWidth)} ${"-".repeat(MAX_COLUMN_WIDTH)}`); + + for (const [key, value] of Object.entries(rows)) { + const truncatedKey = truncate(key, keyWidth); + const truncatedValue = truncate(value, MAX_COLUMN_WIDTH); + lines.push(`${truncatedKey.padEnd(keyWidth)} ${truncatedValue}`); + } + + return lines.join("\n"); +} + +/** + * Render an array of primitives as a single VALUE column. + */ +function formatPrimitiveArray(arr: unknown[]): string { + const lines: string[] = []; + lines.push("VALUE"); + lines.push("-".repeat(MAX_COLUMN_WIDTH)); + for (const item of arr) { + const value = item === null || item === undefined ? "" : String(item); + lines.push(truncate(value, MAX_COLUMN_WIDTH)); + } + return lines.join("\n"); +} + +/** + * Render an array of objects as a columnar table with headers. + */ +function formatColumnarTable(rows: Record[]): string { + // Collect all unique keys across all rows (preserving insertion order) + const keySet = new Set(); + for (const row of rows) { + for (const key of Object.keys(row)) { + keySet.add(key); + } + } + const columns = Array.from(keySet); + + // Flatten all rows + const flatRows = rows.map(flattenRow); + + // Calculate column widths (header length vs max data length, capped) + const widths: Record = {}; + for (const col of columns) { + const headerLen = col.length; + const maxDataLen = Math.max( + ...flatRows.map((row) => (row[col] ?? "").length), + 0, + ); + widths[col] = Math.min(Math.max(headerLen, maxDataLen), MAX_COLUMN_WIDTH); + } + + // Detect which columns are numeric (all non-empty values are numbers) + const numericCols = new Set(); + for (const col of columns) { + const values = flatRows.map((row) => row[col] ?? "").filter((v) => v !== ""); + if (values.length > 0 && values.every(isNumeric)) { + numericCols.add(col); + } + } + + const lines: string[] = []; + + // Header row + const headerParts = columns.map((col) => { + const width = widths[col] as number; + return col.toUpperCase().padEnd(width); + }); + lines.push(headerParts.join(" ")); + + // Separator + const sepParts = columns.map((col) => "-".repeat(widths[col] as number)); + lines.push(sepParts.join(" ")); + + // Data rows + for (const row of flatRows) { + const parts = columns.map((col) => { + const width = widths[col] as number; + const value = truncate(row[col] ?? "", width); + if (numericCols.has(col)) { + return value.padStart(width); + } + return value.padEnd(width); + }); + lines.push(parts.join(" ")); + } + + return lines.join("\n"); +} diff --git a/src/format/types.ts b/src/format/types.ts new file mode 100644 index 0000000..04deaaf --- /dev/null +++ b/src/format/types.ts @@ -0,0 +1,23 @@ +/** + * Supported output format types for CLI results. + * "json" is the default; others are opt-in via --format flag. + */ +export type OutputFormat = "json" | "table" | "yaml" | "csv" | "ndjson"; + +/** + * All valid format values, used for validation. + */ +export const VALID_FORMATS: ReadonlySet = new Set([ + "json", + "table", + "yaml", + "csv", + "ndjson", +]); + +/** + * Check if a string is a valid OutputFormat. + */ +export function isValidFormat(value: string): value is OutputFormat { + return VALID_FORMATS.has(value); +} diff --git a/src/format/yaml.ts b/src/format/yaml.ts new file mode 100644 index 0000000..0992e43 --- /dev/null +++ b/src/format/yaml.ts @@ -0,0 +1,184 @@ +/** + * Simple YAML formatter -- no external dependencies. + * Handles flat/nested objects, arrays, and primitive values. + * Produces valid YAML output with proper string quoting. + */ + +/** + * Characters that require quoting in YAML string values. + */ +const YAML_SPECIAL_CHARS = /[:#\[\]{}&*!|>'"%@`,?\\]/; +const YAML_BOOL_PATTERN = /^(true|false|yes|no|on|off|null)$/i; +const YAML_NUMBER_PATTERN = /^[-+]?(\d+\.?\d*|\.\d+)([eE][-+]?\d+)?$/; + +/** + * Check if a string value needs quoting in YAML. + */ +function needsQuoting(value: string): boolean { + if (value === "") return true; + if (value.startsWith(" ") || value.endsWith(" ")) return true; + if (YAML_SPECIAL_CHARS.test(value)) return true; + if (YAML_BOOL_PATTERN.test(value)) return true; + if (YAML_NUMBER_PATTERN.test(value) && typeof value === "string") return true; + if (value.includes("\n")) return true; + return false; +} + +/** + * Quote a YAML string value using double quotes with escaping. + */ +function quoteString(value: string): string { + const escaped = value + .replace(/\\/g, "\\\\") + .replace(/"/g, '\\"') + .replace(/\n/g, "\\n") + .replace(/\t/g, "\\t") + .replace(/\r/g, "\\r"); + return `"${escaped}"`; +} + +/** + * Format a key for YAML output, quoting if necessary. + */ +function formatKey(key: string): string { + if (needsQuoting(key)) { + return quoteString(key); + } + return key; +} + +/** + * Format a value as a YAML string. + */ +function formatValue(value: string): string { + if (needsQuoting(value)) { + return quoteString(value); + } + return value; +} + +/** + * Serialize data to YAML format. + */ +export function formatYaml(data: unknown): string { + return serializeYaml(data, 0).trimEnd(); +} + +/** + * Recursively serialize a value to YAML with proper indentation. + */ +function serializeYaml(data: unknown, indent: number): string { + if (data === null || data === undefined) { + return "null\n"; + } + + if (typeof data === "boolean") { + return `${data}\n`; + } + + if (typeof data === "number") { + return `${data}\n`; + } + + if (typeof data === "string") { + return `${formatValue(data)}\n`; + } + + if (Array.isArray(data)) { + return serializeArray(data, indent); + } + + if (typeof data === "object") { + return serializeObject(data as Record, indent); + } + + return `${String(data)}\n`; +} + +/** + * Serialize an array to YAML. + */ +function serializeArray(arr: unknown[], indent: number): string { + if (arr.length === 0) { + return "[]\n"; + } + + const prefix = " ".repeat(indent); + let result = "\n"; + + for (const item of arr) { + if (item === null || item === undefined) { + result += `${prefix}- null\n`; + } else if (typeof item === "object" && !Array.isArray(item)) { + // Object items: first key on same line as dash + const obj = item as Record; + const keys = Object.keys(obj); + if (keys.length === 0) { + result += `${prefix}- {}\n`; + } else { + const firstKey = keys[0] as string; + const firstValue = obj[firstKey]; + const restKeys = keys.slice(1); + + // First key on dash line + if (isScalar(firstValue)) { + result += `${prefix}- ${formatKey(firstKey)}: ${serializeYaml(firstValue, indent + 2).trimStart()}`; + } else { + result += `${prefix}- ${formatKey(firstKey)}:${serializeYaml(firstValue, indent + 4)}`; + } + + // Remaining keys indented under the dash + for (const key of restKeys) { + const value = obj[key]; + if (isScalar(value)) { + result += `${prefix} ${formatKey(key)}: ${serializeYaml(value, indent + 2).trimStart()}`; + } else { + result += `${prefix} ${formatKey(key)}:${serializeYaml(value, indent + 2)}`; + } + } + } + } else if (Array.isArray(item)) { + result += `${prefix}-${serializeYaml(item, indent + 2)}`; + } else { + result += `${prefix}- ${serializeYaml(item, indent + 2).trimStart()}`; + } + } + + return result; +} + +/** + * Serialize an object to YAML. + */ +function serializeObject( + obj: Record, + indent: number, +): string { + const keys = Object.keys(obj); + if (keys.length === 0) { + return "{}\n"; + } + + const prefix = " ".repeat(indent); + let result = "\n"; + + for (const key of keys) { + const value = obj[key]; + if (isScalar(value)) { + result += `${prefix}${formatKey(key)}: ${serializeYaml(value, indent + 2).trimStart()}`; + } else { + result += `${prefix}${formatKey(key)}:${serializeYaml(value, indent + 2)}`; + } + } + + return result; +} + +/** + * Check if a value is a scalar (not object/array). + */ +function isScalar(value: unknown): boolean { + if (value === null || value === undefined) return true; + if (typeof value === "object") return false; + return true; +} diff --git a/src/generation/auto-regen.ts b/src/generation/auto-regen.ts new file mode 100644 index 0000000..25c6560 --- /dev/null +++ b/src/generation/auto-regen.ts @@ -0,0 +1,133 @@ +/** + * Auto-regeneration of skill files triggered by schema drift. + * Called from the drift hook when tool schemas change. + * Preserves manual sections and respects access control. + */ +import type { ToolSummary } from "../schema/types.ts"; +import type { AccessPolicy } from "../access/types.ts"; +import type { SkillTemplateInput } from "./types.ts"; +import { filterTools } from "../access/filter.ts"; +import { generateSkillMd, generateReferenceMd, estimateTokens } from "./templates.ts"; +import { detectPrefixGroups } from "./grouping.ts"; +import { resolveOutputDir, planFileWrites, executeFileWrites } from "./file-manager.ts"; +import { extractManualSections, injectManualSections } from "./preserve.ts"; +import { createLogger } from "../logger/index.ts"; +import { join } from "node:path"; + +const log = createLogger("auto-regen"); + +/** Result of an auto-regeneration attempt */ +export interface AutoRegenResult { + service: string; + regenerated: boolean; + filesWritten: string[]; + manualSectionsPreserved: number; + toolCount: number; + error?: string; +} + +/** + * Auto-regenerate skill files for a service after drift detection. + * Non-blocking -- errors are logged but never propagated. + * + * @param serviceName - The service whose skills need regeneration + * @param tools - Live tool list from the MCP server + * @param policy - Access control policy to filter tools + * @param outputDir - Optional override for output directory + */ +export async function autoRegenerateSkills( + serviceName: string, + tools: ToolSummary[], + policy: AccessPolicy = {}, + outputDir?: string, +): Promise { + const result: AutoRegenResult = { + service: serviceName, + regenerated: false, + filesWritten: [], + manualSectionsPreserved: 0, + toolCount: 0, + }; + + try { + // Apply access control + const filteredTools = filterTools(tools, policy); + result.toolCount = filteredTools.length; + + if (filteredTools.length === 0) { + log.debug("auto_regen_skipped", { + service: serviceName, + reason: "no_tools_after_filtering", + }); + return result; + } + + // Build skill template input + const input: SkillTemplateInput = { + serviceName, + description: `MCP tools for ${serviceName}`, + tools: filteredTools, + triggerKeywords: [serviceName], + }; + + // Resolve output directory + const resolvedDir = outputDir ?? resolveOutputDir(serviceName); + + // Generate new SKILL.md + let skillMd = generateSkillMd(input); + + // Preserve manual sections from existing file + const existingSkillPath = join(resolvedDir, "SKILL.md"); + const existingFile = Bun.file(existingSkillPath); + if (await existingFile.exists()) { + const existingContent = await existingFile.text(); + const manualSections = extractManualSections(existingContent); + if (manualSections.length > 0) { + skillMd = injectManualSections(skillMd, manualSections); + result.manualSectionsPreserved = manualSections.length; + } + } + + // Build schemas for reference file generation + // Use minimal SchemaOutput objects from ToolSummary data + const schemas = filteredTools.map((t) => ({ + tool: t.name, + description: t.description, + inputSchema: {} as object, + usage: `mcp2cli ${serviceName} ${t.name}`, + })); + + const groups = detectPrefixGroups(schemas, serviceName); + + // Collect generated files + const generated = new Map(); + generated.set("SKILL.md", skillMd); + + for (const group of groups) { + const refMd = generateReferenceMd(group, serviceName); + generated.set(`references/${group.filename}`, refMd); + } + + // Write files using merge mode to preserve user content outside markers + const plans = await planFileWrites(resolvedDir, generated, "merge"); + const written = await executeFileWrites(plans); + + result.regenerated = true; + result.filesWritten = written; + + const tokenCount = estimateTokens(skillMd); + log.info("auto_regen_complete", { + service: serviceName, + filesWritten: written.length, + manualSectionsPreserved: result.manualSectionsPreserved, + toolCount: filteredTools.length, + tokenCount, + }); + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + result.error = message; + log.warn("auto_regen_failed", { service: serviceName, error: message }); + } + + return result; +} diff --git a/src/generation/diff.ts b/src/generation/diff.ts new file mode 100644 index 0000000..e4ea528 --- /dev/null +++ b/src/generation/diff.ts @@ -0,0 +1,187 @@ +/** + * Skill file diff preview. + * Compares existing skill file tool lists against newly generated tool lists + * to produce a human-readable diff showing added, removed, and modified tools. + */ +import type { ToolSummary } from "../schema/types.ts"; + +/** Classification of a single tool change */ +export interface ToolChange { + tool: string; + type: "added" | "removed" | "modified"; + /** For modified tools -- what changed */ + details?: string; +} + +/** Complete diff result for a skill file */ +export interface SkillDiffResult { + service: string; + hasChanges: boolean; + added: ToolChange[]; + removed: ToolChange[]; + modified: ToolChange[]; + /** Total tool count in the new version */ + newToolCount: number; + /** Total tool count in the existing version */ + existingToolCount: number; +} + +/** + * Parse tool names and descriptions from an existing SKILL.md file. + * Extracts from the quick reference table (| Tool | Description |). + */ +export function parseExistingTools(skillContent: string): ToolSummary[] { + const tools: ToolSummary[] = []; + const lines = skillContent.split("\n"); + + let inTable = false; + let headerSeen = false; + + for (const line of lines) { + const trimmed = line.trim(); + + // Detect table start by header row (exact match on "| Tool |" pattern) + if ( + (trimmed.startsWith("| Tool |") || trimmed.startsWith("| tool |")) && + trimmed.includes("Description") + ) { + inTable = true; + headerSeen = false; + continue; + } + + // Skip separator row (|------|...) + if (inTable && !headerSeen && trimmed.startsWith("|---")) { + headerSeen = true; + continue; + } + + // Parse data rows + if (inTable && headerSeen && trimmed.startsWith("|")) { + const cells = trimmed + .split("|") + .map((c) => c.trim()) + .filter((c) => c.length > 0); + + if (cells.length >= 2) { + tools.push({ + name: cells[0]!, + description: cells[1]!, + }); + } + continue; + } + + // End of table + if (inTable && headerSeen && !trimmed.startsWith("|")) { + inTable = false; + } + } + + return tools; +} + +/** + * Compute the diff between existing and new tool lists. + * Identifies added, removed, and modified (description changed) tools. + */ +export function computeSkillDiff( + service: string, + existingTools: ToolSummary[], + newTools: ToolSummary[], +): SkillDiffResult { + const existingMap = new Map(existingTools.map((t) => [t.name, t])); + const newMap = new Map(newTools.map((t) => [t.name, t])); + + const added: ToolChange[] = []; + const removed: ToolChange[] = []; + const modified: ToolChange[] = []; + + // Find removed tools + for (const [name] of existingMap) { + if (!newMap.has(name)) { + removed.push({ tool: name, type: "removed" }); + } + } + + // Find added and modified tools + for (const [name, newTool] of newMap) { + const existing = existingMap.get(name); + if (!existing) { + added.push({ tool: name, type: "added" }); + } else if (existing.description !== newTool.description) { + modified.push({ + tool: name, + type: "modified", + details: `description: "${truncate(existing.description, 40)}" -> "${truncate(newTool.description, 40)}"`, + }); + } + } + + // Sort each list for deterministic output + added.sort((a, b) => a.tool.localeCompare(b.tool)); + removed.sort((a, b) => a.tool.localeCompare(b.tool)); + modified.sort((a, b) => a.tool.localeCompare(b.tool)); + + return { + service, + hasChanges: added.length > 0 || removed.length > 0 || modified.length > 0, + added, + removed, + modified, + newToolCount: newTools.length, + existingToolCount: existingTools.length, + }; +} + +/** Truncate a string with ellipsis */ +function truncate(s: string, maxLen: number): string { + if (s.length <= maxLen) return s; + return s.slice(0, maxLen - 3) + "..."; +} + +/** + * Format a diff result as human-readable text for terminal output. + * Uses +/- /~ prefixes like a simplified diff. + */ +export function formatDiffPreview(diff: SkillDiffResult): string { + const lines: string[] = []; + + lines.push(`Skill diff for "${diff.service}":`); + lines.push( + ` Existing: ${diff.existingToolCount} tools -> New: ${diff.newToolCount} tools`, + ); + lines.push(""); + + if (!diff.hasChanges) { + lines.push(" No changes detected."); + return lines.join("\n"); + } + + if (diff.added.length > 0) { + lines.push(` Added (${diff.added.length}):`); + for (const change of diff.added) { + lines.push(` + ${change.tool}`); + } + lines.push(""); + } + + if (diff.removed.length > 0) { + lines.push(` Removed (${diff.removed.length}):`); + for (const change of diff.removed) { + lines.push(` - ${change.tool}`); + } + lines.push(""); + } + + if (diff.modified.length > 0) { + lines.push(` Modified (${diff.modified.length}):`); + for (const change of diff.modified) { + const detail = change.details ? ` (${change.details})` : ""; + lines.push(` ~ ${change.tool}${detail}`); + } + lines.push(""); + } + + return lines.join("\n"); +} diff --git a/src/generation/index.ts b/src/generation/index.ts index 89f97e9..2600e79 100644 --- a/src/generation/index.ts +++ b/src/generation/index.ts @@ -28,3 +28,23 @@ export { planFileWrites, executeFileWrites, } from "./file-manager.ts"; + +// Preservation +export { + extractManualSections, + injectManualSections, + createManualPlaceholder, +} from "./preserve.ts"; +export type { ManualSection } from "./preserve.ts"; + +// Diff +export { + parseExistingTools, + computeSkillDiff, + formatDiffPreview, +} from "./diff.ts"; +export type { ToolChange, SkillDiffResult } from "./diff.ts"; + +// Auto-regeneration +export { autoRegenerateSkills } from "./auto-regen.ts"; +export type { AutoRegenResult } from "./auto-regen.ts"; diff --git a/src/generation/preserve.ts b/src/generation/preserve.ts new file mode 100644 index 0000000..abdbf81 --- /dev/null +++ b/src/generation/preserve.ts @@ -0,0 +1,191 @@ +/** + * Manual section preservation for skill file regeneration. + * Extracts content between MANUAL markers and re-inserts them after regeneration. + * Ensures user customizations survive auto-regeneration triggered by schema drift. + */ + +/** Markers that delimit user-editable sections in skill files */ +const MANUAL_START_RE = //; +const MANUAL_END_RE = //; +const MANUAL_START = ""; +const MANUAL_END = ""; + +/** A preserved manual section with its position context */ +export interface ManualSection { + /** Content between MANUAL markers (including the markers themselves) */ + content: string; + /** The heading or label immediately before this manual section, if any */ + precedingHeading: string; + /** Index of this section (0-based, in order of appearance) */ + index: number; +} + +/** + * Extract all manual sections from an existing skill file. + * Returns the sections in order of appearance. + * Each section includes the markers and all content between them. + */ +export function extractManualSections(fileContent: string): ManualSection[] { + const sections: ManualSection[] = []; + const lines = fileContent.split("\n"); + + let inManual = false; + let sectionLines: string[] = []; + let precedingHeading = ""; + let sectionIndex = 0; + + for (let i = 0; i < lines.length; i++) { + const line = lines[i]!; + + if (!inManual && MANUAL_START_RE.test(line)) { + inManual = true; + sectionLines = [line]; + + // Look back for the nearest heading + precedingHeading = findPrecedingHeading(lines, i); + continue; + } + + if (inManual) { + sectionLines.push(line); + + if (MANUAL_END_RE.test(line)) { + sections.push({ + content: sectionLines.join("\n"), + precedingHeading, + index: sectionIndex, + }); + inManual = false; + sectionLines = []; + sectionIndex++; + } + } + } + + return sections; +} + +/** + * Find the nearest markdown heading above a given line index. + * Returns empty string if no heading is found within 5 lines. + */ +function findPrecedingHeading(lines: string[], fromIndex: number): string { + const searchStart = Math.max(0, fromIndex - 5); + for (let i = fromIndex - 1; i >= searchStart; i--) { + const line = lines[i]!.trim(); + if (line.startsWith("#")) { + return line; + } + } + return ""; +} + +/** + * Inject preserved manual sections into newly generated content. + * Matching strategy: + * 1. Match by preceding heading (most reliable across regenerations) + * 2. Match by section index (fallback when headings change) + * 3. Append at end if no match found (never lose manual content) + * + * If the generated content already has MANUAL markers, they serve as + * insertion points. Otherwise, sections are appended before the + * AUTO-GENERATED:END marker or at file end. + */ +export function injectManualSections( + generatedContent: string, + preservedSections: ManualSection[], +): string { + if (preservedSections.length === 0) { + return generatedContent; + } + + // Check if generated content already has manual placeholders + const hasPlaceholders = generatedContent.includes(MANUAL_START); + + if (hasPlaceholders) { + return replaceManualPlaceholders(generatedContent, preservedSections); + } + + // No placeholders -- append manual sections before AUTO-GENERATED:END or at end + return appendManualSections(generatedContent, preservedSections); +} + +/** + * Replace empty MANUAL marker pairs in generated content with preserved content. + * Matches by index order. + */ +function replaceManualPlaceholders( + content: string, + sections: ManualSection[], +): string { + let result = content; + let sectionIdx = 0; + + // Find each placeholder pair and replace with preserved content + const startRe = /[\s\S]*?/g; + result = result.replace(startRe, (match) => { + if (sectionIdx < sections.length) { + const preserved = sections[sectionIdx]!.content; + sectionIdx++; + return preserved; + } + return match; + }); + + // If there are more preserved sections than placeholders, append the rest + if (sectionIdx < sections.length) { + const remaining = sections + .slice(sectionIdx) + .map((s) => s.content) + .join("\n\n"); + result = appendBeforeEndMarker(result, remaining); + } + + return result; +} + +/** + * Append manual sections before the AUTO-GENERATED:END marker, + * or at the end of the file if no end marker exists. + */ +function appendManualSections( + content: string, + sections: ManualSection[], +): string { + const manualBlock = sections.map((s) => s.content).join("\n\n"); + return appendBeforeEndMarker(content, manualBlock); +} + +/** + * Insert text before the AUTO-GENERATED:END marker. + * Falls back to appending at file end. + */ +function appendBeforeEndMarker(content: string, toInsert: string): string { + const endMarkerRe = //; + const match = content.match(endMarkerRe); + + if (match && match.index !== undefined) { + const before = content.slice(0, match.index); + const after = content.slice(match.index); + return before + toInsert + "\n\n" + after; + } + + // No end marker -- append at end + return content + "\n\n" + toInsert + "\n"; +} + +/** + * Create an empty manual section block with an optional label. + * Used in templates to provide user-editable areas. + */ +export function createManualPlaceholder(label?: string): string { + const lines: string[] = []; + if (label) { + lines.push(`## ${label}`); + lines.push(""); + } + lines.push(MANUAL_START); + lines.push(""); + lines.push(MANUAL_END); + return lines.join("\n"); +} diff --git a/src/generation/templates.ts b/src/generation/templates.ts index 0b4a059..cd33704 100644 --- a/src/generation/templates.ts +++ b/src/generation/templates.ts @@ -15,6 +15,10 @@ export function estimateTokens(text: string): number { const MARKER_START = ""; const MARKER_END = ""; +/** Manual (user-editable) section markers */ +const MANUAL_START = ""; +const MANUAL_END = ""; + /** * Generate a slim SKILL.md file with YAML frontmatter, tool table, and invoke pattern. * Stays under 300 tokens for typical services. @@ -68,6 +72,14 @@ export function generateSkillMd(input: SkillTemplateInput): string { lines.push(MARKER_END); lines.push(""); + // Manual section for user customizations (preserved across regeneration) + lines.push("## Notes"); + lines.push(""); + lines.push(MANUAL_START); + lines.push(""); + lines.push(MANUAL_END); + lines.push(""); + return lines.join("\n"); } diff --git a/src/invocation/index.ts b/src/invocation/index.ts index ceb3bb8..523fe9f 100644 --- a/src/invocation/index.ts +++ b/src/invocation/index.ts @@ -16,3 +16,4 @@ export type { ToolCallSuccess, } from "./types.ts"; export type { DryRunPreview } from "./dry-run.ts"; +export type { OutputFormat } from "../format/types.ts"; diff --git a/src/invocation/parse.ts b/src/invocation/parse.ts index 7cd72c5..175ff7c 100644 --- a/src/invocation/parse.ts +++ b/src/invocation/parse.ts @@ -1,8 +1,10 @@ import type { ParseResult } from "./types.ts"; +import type { OutputFormat } from "../format/types.ts"; +import { isValidFormat } from "../format/types.ts"; /** * Parse CLI argv into a structured tool call. - * Expects: [serviceName, toolName, ...rest] where rest may contain --params, --dry-run, --fields. + * Expects: [serviceName, toolName, ...rest] where rest may contain --params, --dry-run, --fields, --format. * * Supports flag syntaxes: * --params '{"key":"value"}' (space-separated) @@ -10,6 +12,8 @@ import type { ParseResult } from "./types.ts"; * --dry-run (boolean flag) * --fields 'id,name' (space-separated) * --fields='id,name' (equals-joined) + * --format table (space-separated) + * --format=yaml (equals-joined) * * Returns a discriminated union -- callers check .ok before accessing .value or .error. */ @@ -33,6 +37,7 @@ export function parseToolCallArgs(args: string[]): ParseResult { let paramsJson: string | undefined; let dryRun = false; let fields: string[] = []; + let format: OutputFormat = "json"; // Unified multi-flag scan: process ALL flags in a single pass const rest = args.slice(2); @@ -72,13 +77,48 @@ export function parseToolCallArgs(args: string[]): ParseResult { fields = arg.slice("--fields=".length).split(",").filter(Boolean); continue; } + + if (arg === "--format") { + const nextArg = rest[i + 1] as string | undefined; + if (nextArg !== undefined) { + if (!isValidFormat(nextArg)) { + return { + ok: false, + error: { + error: true, + code: "INPUT_VALIDATION_ERROR", + message: `Invalid format "${nextArg}". Valid formats: json, table, yaml, csv, ndjson`, + }, + }; + } + format = nextArg; + i++; // advance past value + } + continue; + } + + if (arg.startsWith("--format=")) { + const value = arg.slice("--format=".length); + if (!isValidFormat(value)) { + return { + ok: false, + error: { + error: true, + code: "INPUT_VALIDATION_ERROR", + message: `Invalid format "${value}". Valid formats: json, table, yaml, csv, ndjson`, + }, + }; + } + format = value; + continue; + } } // Default to empty object if no --params provided if (paramsJson === undefined) { return { ok: true, - value: { serviceName, toolName, params: {}, dryRun, fields }, + value: { serviceName, toolName, params: {}, dryRun, fields, format }, }; } @@ -101,7 +141,7 @@ export function parseToolCallArgs(args: string[]): ParseResult { const params = parsed as Record; return { ok: true, - value: { serviceName, toolName, params, dryRun, fields }, + value: { serviceName, toolName, params, dryRun, fields, format }, }; } catch (err) { const message = diff --git a/src/invocation/types.ts b/src/invocation/types.ts index e499f8e..f8bc4e2 100644 --- a/src/invocation/types.ts +++ b/src/invocation/types.ts @@ -1,4 +1,5 @@ import type { ErrorCode } from "../types/index.ts"; +import type { OutputFormat } from "../format/types.ts"; /** * Parsed CLI tool call arguments. @@ -10,6 +11,7 @@ export interface ParsedToolCall { params: Record; dryRun: boolean; fields: string[]; + format: OutputFormat; } /** diff --git a/src/resilience/circuit-breaker.ts b/src/resilience/circuit-breaker.ts new file mode 100644 index 0000000..6382838 --- /dev/null +++ b/src/resilience/circuit-breaker.ts @@ -0,0 +1,260 @@ +/** + * Circuit breaker for HTTP gateway connections. + * Tracks consecutive failures per service and opens the circuit after threshold. + * State is disk-persisted so rapid successive CLI invocations share state. + * + * State machine: + * CLOSED -- normal operation, HTTP attempted first + * OPEN -- HTTP skipped, fallback used directly + * HALF-OPEN -- after cooldown, one probe attempt allowed + * + * Transitions: + * CLOSED -> OPEN: failureCount >= threshold + * OPEN -> HALF-OPEN: cooldown period elapsed + * HALF-OPEN -> CLOSED: probe succeeds (recordSuccess) + * HALF-OPEN -> OPEN: probe fails (recordFailure) + */ +import { mkdir, unlink, rename } from "node:fs/promises"; +import { dirname, join } from "node:path"; +import { createLogger } from "../logger/index.ts"; +import type { + CircuitBreakerConfig, + CircuitBreakerState, + CircuitState, +} from "./types.ts"; +import { DEFAULT_CIRCUIT_BREAKER_CONFIG } from "./types.ts"; + +const log = createLogger("circuit-breaker"); + +const VALID_STATES = new Set(["closed", "open", "half-open"]); + +/** Validate service name to prevent path traversal attacks. */ +function validateServiceName(service: string): string { + if (service.includes('/') || service.includes('\\') || service.includes('..')) { + throw new Error(`Invalid service name: "${service}"`); + } + return service; +} + +/** Initial state for a service with no prior history. */ +function initialState(): CircuitBreakerState { + return { + state: "closed", + failureCount: 0, + lastFailureAt: null, + openedAt: null, + lastSuccessAt: null, + }; +} + +/** + * Resolve the circuit breaker storage directory. + * Uses MCP2CLI_CACHE_DIR if set, otherwise ~/.cache/mcp2cli/circuit-breaker. + */ +export function getCircuitBreakerDir(): string { + const cacheBase = process.env.MCP2CLI_CACHE_DIR; + if (cacheBase) { + // MCP2CLI_CACHE_DIR points at schemas dir; go up one level for sibling + return join(dirname(cacheBase), "circuit-breaker"); + } + const home = process.env.HOME; + if (!home) { + throw new Error( + "Cannot determine circuit breaker path: HOME environment variable is not set", + ); + } + return join(home, ".cache", "mcp2cli", "circuit-breaker"); +} + +/** Get the state file path for a given service. */ +export function getStateFilePath(service: string): string { + validateServiceName(service); + return join(getCircuitBreakerDir(), `${service}.json`); +} + +/** + * Load persisted circuit breaker state for a service. + * Returns initial (closed) state if no file exists or file is corrupted. + */ +export async function loadState( + service: string, +): Promise { + const filePath = getStateFilePath(service); + const file = Bun.file(filePath); + + if (!(await file.exists())) { + return initialState(); + } + + try { + const data = (await file.json()) as CircuitBreakerState; + // Basic structure validation + if ( + !data.state || + !VALID_STATES.has(data.state) || + typeof data.failureCount !== "number" || + !Number.isFinite(data.failureCount) + ) { + log.warn("corrupt_state", { service, path: filePath }); + return initialState(); + } + return data; + } catch { + log.warn("corrupt_state", { service, path: filePath }); + return initialState(); + } +} + +/** + * Persist circuit breaker state to disk. + * Uses atomic write (temp + rename) to prevent corruption. + */ +export async function saveState( + service: string, + state: CircuitBreakerState, +): Promise { + const filePath = getStateFilePath(service); + const dir = dirname(filePath); + + await mkdir(dir, { recursive: true }); + + const tempPath = `${filePath}.tmp.${process.pid}`; + try { + await Bun.write(tempPath, JSON.stringify(state, null, 2)); + await rename(tempPath, filePath); + log.debug("state_saved", { service, state: state.state }); + } catch (err) { + await unlink(tempPath).catch(() => {}); + throw err; + } +} + +/** + * Clear persisted circuit breaker state for a service. + * Useful for manual reset or testing. + */ +export async function clearState(service: string): Promise { + const filePath = getStateFilePath(service); + await unlink(filePath).catch(() => {}); +} + +/** + * Determine the effective circuit state, accounting for cooldown transitions. + * If the circuit is open and cooldown has elapsed, transitions to half-open. + */ +export function resolveState( + state: CircuitBreakerState, + config: CircuitBreakerConfig = DEFAULT_CIRCUIT_BREAKER_CONFIG, +): CircuitState { + if (state.state !== "open") { + return state.state; + } + + // Check if cooldown has elapsed -> transition to half-open + if (state.openedAt) { + const openedTime = new Date(state.openedAt).getTime(); + const elapsed = Date.now() - openedTime; + if (elapsed >= config.cooldownMs) { + return "half-open"; + } + } + + return "open"; +} + +/** + * Record a connection failure and return the updated state. + * Transitions closed -> open when threshold is reached. + * Transitions half-open -> open on probe failure. + */ +// Note: read-modify-write without locking. Concurrent CLI calls may cause +// delayed threshold detection (2N vs N failures). Accepted tradeoff -- +// worst case is delayed circuit-open, not data corruption. +export async function recordFailure( + service: string, + config: CircuitBreakerConfig = DEFAULT_CIRCUIT_BREAKER_CONFIG, +): Promise { + const state = await loadState(service); + const now = new Date().toISOString(); + + state.failureCount += 1; + state.lastFailureAt = now; + + const effectiveState = resolveState(state, config); + + if (effectiveState === "half-open") { + // Probe failed -- reopen circuit + state.state = "open"; + state.openedAt = now; + log.warn("circuit_reopened", { + service, + failureCount: state.failureCount, + }); + } else if ( + effectiveState === "closed" && + state.failureCount >= config.failureThreshold + ) { + // Threshold reached -- open circuit + state.state = "open"; + state.openedAt = now; + log.warn("circuit_opened", { + service, + failureCount: state.failureCount, + threshold: config.failureThreshold, + }); + } + + await saveState(service, state); + return state; +} + +/** + * Record a successful connection and return the updated state. + * Resets failure count and closes the circuit. + */ +// Note: read-modify-write without locking. Concurrent CLI calls may cause +// delayed threshold detection (2N vs N failures). Accepted tradeoff -- +// worst case is delayed circuit-open, not data corruption. +export async function recordSuccess( + service: string, +): Promise { + const state = await loadState(service); + const now = new Date().toISOString(); + + const previousState = state.state; + state.state = "closed"; + state.failureCount = 0; + state.lastSuccessAt = now; + state.openedAt = null; + + if (previousState !== "closed") { + log.info("circuit_closed", { service, previousState }); + } + + await saveState(service, state); + return state; +} + +/** + * Check whether HTTP should be attempted for this service. + * Returns true if circuit is closed or half-open (probe allowed). + * Returns false if circuit is open (skip HTTP, use fallback directly). + */ +export async function shouldAttemptHttp( + service: string, + config: CircuitBreakerConfig = DEFAULT_CIRCUIT_BREAKER_CONFIG, +): Promise { + const state = await loadState(service); + const effective = resolveState(state, config); + + if (effective === "open") { + log.info("circuit_open_skipping_http", { service }); + return false; + } + + if (effective === "half-open") { + log.info("circuit_half_open_probing", { service }); + } + + return true; +} diff --git a/src/resilience/index.ts b/src/resilience/index.ts new file mode 100644 index 0000000..44ac3de --- /dev/null +++ b/src/resilience/index.ts @@ -0,0 +1,19 @@ +export type { + CircuitState, + CircuitBreakerState, + CircuitBreakerConfig, +} from "./types.ts"; + +export { DEFAULT_CIRCUIT_BREAKER_CONFIG } from "./types.ts"; + +export { + loadState, + saveState, + clearState, + resolveState, + recordFailure, + recordSuccess, + shouldAttemptHttp, + getCircuitBreakerDir, + getStateFilePath, +} from "./circuit-breaker.ts"; diff --git a/src/resilience/types.ts b/src/resilience/types.ts new file mode 100644 index 0000000..3759f3a --- /dev/null +++ b/src/resilience/types.ts @@ -0,0 +1,38 @@ +/** + * Types for the circuit breaker resilience module. + * Circuit breaker protects against repeated calls to an unreachable HTTP gateway. + */ + +/** Circuit breaker states following the standard pattern. */ +export type CircuitState = "closed" | "open" | "half-open"; + +/** + * Persisted circuit breaker state for a single service. + * Stored at ~/.cache/mcp2cli/circuit-breaker/{service}.json + */ +export interface CircuitBreakerState { + /** Current circuit state */ + state: CircuitState; + /** Number of consecutive failures while circuit is closed */ + failureCount: number; + /** ISO timestamp of the last failure */ + lastFailureAt: string | null; + /** ISO timestamp when circuit was opened (null if not open) */ + openedAt: string | null; + /** ISO timestamp of last successful connection */ + lastSuccessAt: string | null; +} + +/** Configuration for circuit breaker behavior. */ +export interface CircuitBreakerConfig { + /** Number of consecutive failures before opening the circuit (default: 5) */ + failureThreshold: number; + /** Cooldown period in ms before a half-open probe is attempted (default: 60000) */ + cooldownMs: number; +} + +/** Default circuit breaker configuration. */ +export const DEFAULT_CIRCUIT_BREAKER_CONFIG: CircuitBreakerConfig = { + failureThreshold: 5, + cooldownMs: 60_000, +}; diff --git a/src/types/index.ts b/src/types/index.ts index 6c27ab1..9f828b6 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -11,6 +11,7 @@ export type ErrorCode = | "TOOL_ERROR" | "TOOL_TIMEOUT" | "UNKNOWN_COMMAND" + | "TOOL_BLOCKED" | "INTERNAL_ERROR"; /** diff --git a/tests/access/filter.test.ts b/tests/access/filter.test.ts new file mode 100644 index 0000000..f6361ac --- /dev/null +++ b/tests/access/filter.test.ts @@ -0,0 +1,216 @@ +import { describe, expect, test } from "bun:test"; +import { + globToRegex, + checkToolAccess, + filterTools, + extractPolicy, +} from "../../src/access/index.ts"; +import type { AccessPolicy } from "../../src/access/index.ts"; + +// -- globToRegex -- + +describe("globToRegex", () => { + test("exact match", () => { + const re = globToRegex("list_workflows"); + expect(re.test("list_workflows")).toBe(true); + expect(re.test("list_workflow")).toBe(false); + expect(re.test("list_workflowss")).toBe(false); + }); + + test("wildcard * matches any characters", () => { + const re = globToRegex("list_*"); + expect(re.test("list_workflows")).toBe(true); + expect(re.test("list_nodes")).toBe(true); + expect(re.test("list_")).toBe(true); + expect(re.test("get_workflows")).toBe(false); + }); + + test("wildcard * at start", () => { + const re = globToRegex("*_workflows"); + expect(re.test("list_workflows")).toBe(true); + expect(re.test("get_workflows")).toBe(true); + expect(re.test("workflows")).toBe(false); + }); + + test("wildcard ? matches single character", () => { + const re = globToRegex("tool_?"); + expect(re.test("tool_a")).toBe(true); + expect(re.test("tool_b")).toBe(true); + expect(re.test("tool_ab")).toBe(false); + expect(re.test("tool_")).toBe(false); + }); + + test("combined * and ?", () => { + const re = globToRegex("n8n_*_workflow?"); + expect(re.test("n8n_list_workflows")).toBe(true); + expect(re.test("n8n_get_workflowX")).toBe(true); + expect(re.test("n8n_list_workflowXY")).toBe(false); + }); + + test("empty pattern matches empty string only", () => { + const re = globToRegex(""); + expect(re.test("")).toBe(true); + expect(re.test("anything")).toBe(false); + }); + + test("escapes regex special characters", () => { + const re = globToRegex("tool.name"); + expect(re.test("tool.name")).toBe(true); + expect(re.test("toolXname")).toBe(false); // . should not be regex wildcard + }); +}); + +// -- checkToolAccess -- + +describe("checkToolAccess", () => { + test("allows tool when no policy is set", () => { + const result = checkToolAccess("any_tool", {}); + expect(result.allowed).toBe(true); + expect(result.reason).toBeUndefined(); + }); + + test("allows tool matching allowTools pattern", () => { + const policy: AccessPolicy = { allowTools: ["list_*"] }; + const result = checkToolAccess("list_workflows", policy); + expect(result.allowed).toBe(true); + }); + + test("blocks tool not matching allowTools pattern", () => { + const policy: AccessPolicy = { allowTools: ["list_*"] }; + const result = checkToolAccess("delete_workflows", policy); + expect(result.allowed).toBe(false); + expect(result.reason).toContain("not in the allowTools list"); + }); + + test("blocks tool matching blockTools pattern", () => { + const policy: AccessPolicy = { blockTools: ["delete_*"] }; + const result = checkToolAccess("delete_workflows", policy); + expect(result.allowed).toBe(false); + expect(result.reason).toContain("blocked by policy"); + }); + + test("allows tool not matching blockTools pattern", () => { + const policy: AccessPolicy = { blockTools: ["delete_*"] }; + const result = checkToolAccess("list_workflows", policy); + expect(result.allowed).toBe(true); + }); + + test("allowTools + blockTools: allow first, then block", () => { + const policy: AccessPolicy = { + allowTools: ["n8n_*"], + blockTools: ["n8n_delete_*"], + }; + // Passes allow, not blocked + expect(checkToolAccess("n8n_list_workflows", policy).allowed).toBe(true); + // Passes allow but blocked + expect(checkToolAccess("n8n_delete_workflow", policy).allowed).toBe(false); + // Fails allow + expect(checkToolAccess("other_tool", policy).allowed).toBe(false); + }); + + test("empty allowTools array passes all through", () => { + const policy: AccessPolicy = { allowTools: [] }; + const result = checkToolAccess("any_tool", policy); + expect(result.allowed).toBe(true); + }); + + test("empty blockTools array blocks nothing", () => { + const policy: AccessPolicy = { blockTools: [] }; + const result = checkToolAccess("any_tool", policy); + expect(result.allowed).toBe(true); + }); +}); + +// -- filterTools -- + +describe("filterTools", () => { + const tools = [ + { name: "list_workflows", description: "List workflows" }, + { name: "get_workflow", description: "Get workflow" }, + { name: "delete_workflow", description: "Delete workflow" }, + { name: "create_workflow", description: "Create workflow" }, + { name: "list_nodes", description: "List nodes" }, + ]; + + test("allowTools only (whitelist)", () => { + const policy: AccessPolicy = { allowTools: ["list_*"] }; + const filtered = filterTools(tools, policy); + expect(filtered).toHaveLength(2); + expect(filtered.map((t) => t.name)).toEqual(["list_workflows", "list_nodes"]); + }); + + test("blockTools only (blacklist)", () => { + const policy: AccessPolicy = { blockTools: ["delete_*"] }; + const filtered = filterTools(tools, policy); + expect(filtered).toHaveLength(4); + expect(filtered.map((t) => t.name)).not.toContain("delete_workflow"); + }); + + test("both allowTools and blockTools", () => { + const policy: AccessPolicy = { + allowTools: ["*_workflow", "*_workflows"], + blockTools: ["delete_*"], + }; + const filtered = filterTools(tools, policy); + expect(filtered).toHaveLength(3); + const names = filtered.map((t) => t.name); + expect(names).toContain("list_workflows"); + expect(names).toContain("get_workflow"); + expect(names).toContain("create_workflow"); + expect(names).not.toContain("delete_workflow"); + expect(names).not.toContain("list_nodes"); + }); + + test("no policy (passes all through)", () => { + const filtered = filterTools(tools, {}); + expect(filtered).toHaveLength(5); + }); + + test("empty arrays (passes all through)", () => { + const filtered = filterTools(tools, { allowTools: [], blockTools: [] }); + expect(filtered).toHaveLength(5); + }); + + test("preserves extra properties on tool objects", () => { + const richTools = [ + { name: "tool_a", description: "A", extra: 42 }, + { name: "tool_b", description: "B", extra: 99 }, + ]; + const policy: AccessPolicy = { allowTools: ["tool_a"] }; + const filtered = filterTools(richTools, policy); + expect(filtered).toHaveLength(1); + expect(filtered[0]!.extra).toBe(42); + }); + + test("empty tool list returns empty", () => { + const policy: AccessPolicy = { allowTools: ["*"] }; + const filtered = filterTools([], policy); + expect(filtered).toHaveLength(0); + }); +}); + +// -- extractPolicy -- + +describe("extractPolicy", () => { + test("extracts allowTools and blockTools from config", () => { + const config = { + allowTools: ["list_*"], + blockTools: ["delete_*"], + }; + const policy = extractPolicy(config); + expect(policy.allowTools).toEqual(["list_*"]); + expect(policy.blockTools).toEqual(["delete_*"]); + }); + + test("handles missing fields", () => { + const policy = extractPolicy({}); + expect(policy.allowTools).toBeUndefined(); + expect(policy.blockTools).toBeUndefined(); + }); + + test("handles partial config (only allowTools)", () => { + const policy = extractPolicy({ allowTools: ["*"] }); + expect(policy.allowTools).toEqual(["*"]); + expect(policy.blockTools).toBeUndefined(); + }); +}); diff --git a/tests/cache/drift.test.ts b/tests/cache/drift.test.ts new file mode 100644 index 0000000..aa16e32 --- /dev/null +++ b/tests/cache/drift.test.ts @@ -0,0 +1,215 @@ +import { describe, expect, test, beforeEach, afterEach } from "bun:test"; +import { detectDrift } from "../../src/cache/drift.ts"; +import { setLogLevel, resetLogLevel } from "../../src/logger/index.ts"; +import type { CachedToolSchema } from "../../src/cache/types.ts"; + +// Suppress log output during tests +beforeEach(() => setLogLevel("silent")); +afterEach(() => resetLogLevel()); + +function makeTool( + name: string, + hash: string, + overrides?: Partial, +): CachedToolSchema { + return { + name, + description: `Description for ${name}`, + inputSchema: { + type: "object", + properties: {}, + }, + hash, + ...overrides, + }; +} + +const EARLIER = "2026-03-09T06:00:00.000Z"; + +// -- detectDrift -- + +describe("detectDrift", () => { + test("no drift when cached and live are identical", () => { + const tools = [makeTool("tool_a", "hash1"), makeTool("tool_b", "hash2")]; + const result = detectDrift("svc", tools, tools, EARLIER); + expect(result.hasDrift).toBe(false); + expect(result.changes).toHaveLength(0); + expect(result.service).toBe("svc"); + }); + + test("detects added tools", () => { + const cached = [makeTool("tool_a", "hash1")]; + const live = [makeTool("tool_a", "hash1"), makeTool("tool_b", "hash2")]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes).toHaveLength(1); + expect(result.changes[0]!.tool).toBe("tool_b"); + expect(result.changes[0]!.type).toBe("added"); + }); + + test("detects removed tools", () => { + const cached = [makeTool("tool_a", "hash1"), makeTool("tool_b", "hash2")]; + const live = [makeTool("tool_a", "hash1")]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes).toHaveLength(1); + expect(result.changes[0]!.tool).toBe("tool_b"); + expect(result.changes[0]!.type).toBe("removed"); + }); + + test("detects changed tools via hash mismatch", () => { + const cached = [makeTool("tool_a", "hash_v1")]; + const live = [makeTool("tool_a", "hash_v2")]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes).toHaveLength(1); + expect(result.changes[0]!.tool).toBe("tool_a"); + expect(result.changes[0]!.type).toBe("changed"); + }); + + test("detects multiple changes simultaneously", () => { + const cached = [ + makeTool("kept", "hash1"), + makeTool("removed", "hash2"), + makeTool("changed", "old_hash"), + ]; + const live = [ + makeTool("kept", "hash1"), + makeTool("added", "hash_new"), + makeTool("changed", "new_hash"), + ]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes).toHaveLength(3); + + // Sorted by tool name + const types = result.changes.map((c) => `${c.type}:${c.tool}`); + expect(types).toContain("added:added"); + expect(types).toContain("removed:removed"); + expect(types).toContain("changed:changed"); + }); + + test("includes details for changed tools -- params added", () => { + const cached = [ + makeTool("tool", "hash1", { + inputSchema: { + type: "object", + properties: { name: { type: "string" } }, + }, + }), + ]; + const live = [ + makeTool("tool", "hash2", { + inputSchema: { + type: "object", + properties: { + name: { type: "string" }, + email: { type: "string" }, + }, + }, + }), + ]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes[0]!.details).toContain("params added: email"); + }); + + test("includes details for changed tools -- params removed", () => { + const cached = [ + makeTool("tool", "hash1", { + inputSchema: { + type: "object", + properties: { + name: { type: "string" }, + legacy: { type: "string" }, + }, + }, + }), + ]; + const live = [ + makeTool("tool", "hash2", { + inputSchema: { + type: "object", + properties: { name: { type: "string" } }, + }, + }), + ]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes[0]!.details).toContain("params removed: legacy"); + }); + + test("includes details for changed tools -- required changed", () => { + const cached = [ + makeTool("tool", "hash1", { + inputSchema: { + type: "object", + properties: { name: { type: "string" } }, + required: [], + }, + }), + ]; + const live = [ + makeTool("tool", "hash2", { + inputSchema: { + type: "object", + properties: { name: { type: "string" } }, + required: ["name"], + }, + }), + ]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes[0]!.details).toContain("newly required: name"); + }); + + test("includes details for changed tools -- description changed", () => { + const cached = [makeTool("tool", "hash1", { description: "Old desc" })]; + const live = [makeTool("tool", "hash2", { description: "New desc" })]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.hasDrift).toBe(true); + expect(result.changes[0]!.details).toContain("description changed"); + }); + + test("changes are sorted by tool name", () => { + const cached = [makeTool("z_tool", "h1"), makeTool("a_tool", "h2")]; + const live = [makeTool("z_tool", "h1_new"), makeTool("a_tool", "h2_new")]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.changes[0]!.tool).toBe("a_tool"); + expect(result.changes[1]!.tool).toBe("z_tool"); + }); + + test("result includes timestamps", () => { + const cached = [makeTool("tool", "hash1")]; + const live = [makeTool("tool", "hash2")]; + const result = detectDrift("svc", cached, live, EARLIER); + + expect(result.cachedAt).toBe(EARLIER); + expect(result.detectedAt).toBeTruthy(); + // detectedAt should be a valid ISO string + expect(new Date(result.detectedAt).toISOString()).toBe(result.detectedAt); + }); + + test("empty cached and empty live -- no drift", () => { + const result = detectDrift("svc", [], [], EARLIER); + expect(result.hasDrift).toBe(false); + expect(result.changes).toHaveLength(0); + }); + + test("empty cached with new live tools -- all added", () => { + const live = [makeTool("tool_a", "h1"), makeTool("tool_b", "h2")]; + const result = detectDrift("svc", [], live, EARLIER); + expect(result.hasDrift).toBe(true); + expect(result.changes).toHaveLength(2); + expect(result.changes.every((c) => c.type === "added")).toBe(true); + }); +}); diff --git a/tests/cache/hash.test.ts b/tests/cache/hash.test.ts new file mode 100644 index 0000000..5515f0f --- /dev/null +++ b/tests/cache/hash.test.ts @@ -0,0 +1,144 @@ +import { describe, expect, test } from "bun:test"; +import { canonicalJson, hashToolSchema } from "../../src/cache/hash.ts"; + +// -- canonicalJson -- + +describe("canonicalJson", () => { + test("sorts object keys alphabetically", () => { + const result = canonicalJson({ z: 1, a: 2, m: 3 }); + expect(result).toBe('{"a":2,"m":3,"z":1}'); + }); + + test("sorts nested object keys recursively", () => { + const result = canonicalJson({ + outer: { z: 1, a: 2 }, + alpha: true, + }); + expect(result).toBe('{"alpha":true,"outer":{"a":2,"z":1}}'); + }); + + test("preserves array order (arrays are not sorted)", () => { + const result = canonicalJson({ items: [3, 1, 2] }); + expect(result).toBe('{"items":[3,1,2]}'); + }); + + test("handles null values", () => { + const result = canonicalJson({ a: null, b: 1 }); + expect(result).toBe('{"a":null,"b":1}'); + }); + + test("handles empty objects", () => { + const result = canonicalJson({}); + expect(result).toBe("{}"); + }); + + test("handles primitive values", () => { + expect(canonicalJson("hello")).toBe('"hello"'); + expect(canonicalJson(42)).toBe("42"); + expect(canonicalJson(true)).toBe("true"); + expect(canonicalJson(null)).toBe("null"); + }); + + test("produces no whitespace", () => { + const result = canonicalJson({ key: { nested: "value" } }); + expect(result).not.toContain(" "); + expect(result).not.toContain("\n"); + }); + + test("identical objects produce identical output regardless of insertion order", () => { + const a = canonicalJson({ x: 1, y: 2, z: 3 }); + const b = canonicalJson({ z: 3, x: 1, y: 2 }); + expect(a).toBe(b); + }); +}); + +// -- hashToolSchema -- + +describe("hashToolSchema", () => { + const baseTool = { + name: "test_tool", + description: "A test tool", + inputSchema: { + type: "object", + properties: { + name: { type: "string" }, + }, + required: ["name"], + }, + }; + + test("returns a 64-character hex string (SHA-256)", async () => { + const hash = await hashToolSchema(baseTool); + expect(hash).toHaveLength(64); + expect(hash).toMatch(/^[0-9a-f]{64}$/); + }); + + test("identical schemas produce identical hashes", async () => { + const hash1 = await hashToolSchema(baseTool); + const hash2 = await hashToolSchema({ ...baseTool }); + expect(hash1).toBe(hash2); + }); + + test("different schemas produce different hashes", async () => { + const hash1 = await hashToolSchema(baseTool); + const hash2 = await hashToolSchema({ + ...baseTool, + name: "other_tool", + }); + expect(hash1).not.toBe(hash2); + }); + + test("property order does not affect hash", async () => { + const hash1 = await hashToolSchema({ + name: "tool", + description: "desc", + inputSchema: { type: "object", properties: { a: { type: "string" }, b: { type: "number" } } }, + }); + const hash2 = await hashToolSchema({ + name: "tool", + description: "desc", + inputSchema: { type: "object", properties: { b: { type: "number" }, a: { type: "string" } } }, + }); + expect(hash1).toBe(hash2); + }); + + test("missing description defaults to empty string", async () => { + const hash1 = await hashToolSchema({ + name: "tool", + inputSchema: { type: "object" }, + }); + const hash2 = await hashToolSchema({ + name: "tool", + description: "", + inputSchema: { type: "object" }, + }); + expect(hash1).toBe(hash2); + }); + + test("missing annotations defaults to null", async () => { + const hash1 = await hashToolSchema({ + name: "tool", + description: "desc", + inputSchema: { type: "object" }, + }); + const hash2 = await hashToolSchema({ + name: "tool", + description: "desc", + inputSchema: { type: "object" }, + annotations: undefined, + }); + expect(hash1).toBe(hash2); + }); + + test("different annotations produce different hashes", async () => { + const hash1 = await hashToolSchema({ + ...baseTool, + annotations: { readOnly: true }, + }); + const hash2 = await hashToolSchema({ + ...baseTool, + annotations: { readOnly: false }, + }); + expect(hash1).not.toBe(hash2); + }); +}); diff --git a/tests/cache/storage.test.ts b/tests/cache/storage.test.ts new file mode 100644 index 0000000..8619890 --- /dev/null +++ b/tests/cache/storage.test.ts @@ -0,0 +1,219 @@ +import { describe, expect, test, beforeEach, afterEach } from "bun:test"; +import { join } from "node:path"; +import { mkdtemp, rm } from "node:fs/promises"; +import { tmpdir } from "node:os"; +import { + readCache, + readCacheRaw, + writeCache, + clearCache, + listCachedServices, + isCacheExpired, + getCacheDir, + getCacheFilePath, +} from "../../src/cache/storage.ts"; +import type { CacheMetadata, CachedToolSchema } from "../../src/cache/types.ts"; + +// -- Test setup -- + +let testDir: string; +let origCacheDir: string | undefined; + +beforeEach(async () => { + testDir = await mkdtemp(join(tmpdir(), "mcp2cli-cache-test-")); + origCacheDir = process.env.MCP2CLI_CACHE_DIR; + process.env.MCP2CLI_CACHE_DIR = testDir; +}); + +afterEach(async () => { + if (origCacheDir !== undefined) { + process.env.MCP2CLI_CACHE_DIR = origCacheDir; + } else { + delete process.env.MCP2CLI_CACHE_DIR; + } + await rm(testDir, { recursive: true, force: true }); +}); + +function makeTool(name: string, hash: string = "abc123"): CachedToolSchema { + return { + name, + description: `Test tool ${name}`, + inputSchema: { type: "object", properties: {} }, + hash, + }; +} + +// -- getCacheDir / getCacheFilePath -- + +describe("getCacheDir", () => { + test("uses MCP2CLI_CACHE_DIR when set", () => { + process.env.MCP2CLI_CACHE_DIR = "/custom/cache/dir"; + expect(getCacheDir()).toBe("/custom/cache/dir"); + process.env.MCP2CLI_CACHE_DIR = testDir; // restore for cleanup + }); + + test("falls back to HOME-based path", () => { + delete process.env.MCP2CLI_CACHE_DIR; + const home = process.env.HOME; + expect(getCacheDir()).toBe(join(home!, ".cache", "mcp2cli", "schemas")); + process.env.MCP2CLI_CACHE_DIR = testDir; // restore for cleanup + }); +}); + +describe("getCacheFilePath", () => { + test("returns path with service name and .json extension", () => { + const path = getCacheFilePath("n8n"); + expect(path).toBe(join(testDir, "n8n.json")); + }); +}); + +// -- writeCache / readCache -- + +describe("writeCache + readCache", () => { + test("round-trips tools through cache", async () => { + const tools = [makeTool("tool_a"), makeTool("tool_b")]; + await writeCache("test-svc", tools); + + const cached = await readCache("test-svc"); + expect(cached).not.toBeNull(); + expect(cached!.tools).toHaveLength(2); + expect(cached!.tools[0]!.name).toBe("tool_a"); + expect(cached!.tools[1]!.name).toBe("tool_b"); + }); + + test("metadata includes service name and timestamp", async () => { + await writeCache("my-svc", [makeTool("t1")]); + const cached = await readCache("my-svc"); + expect(cached!.metadata.service).toBe("my-svc"); + expect(cached!.metadata.toolCount).toBe(1); + expect(cached!.metadata.cachedAt).toBeTruthy(); + }); + + test("uses custom TTL when provided", async () => { + const customTtl = 60_000; // 1 minute + await writeCache("svc", [makeTool("t")], customTtl); + const cached = await readCache("svc"); + expect(cached!.metadata.ttlMs).toBe(customTtl); + }); + + test("returns null for nonexistent service", async () => { + const result = await readCache("nonexistent"); + expect(result).toBeNull(); + }); + + test("returns null for expired cache", async () => { + // Write with 1ms TTL -- will be expired immediately + await writeCache("expiring", [makeTool("t")], 1); + // Wait a tiny bit to ensure expiry + await new Promise((r) => setTimeout(r, 5)); + const result = await readCache("expiring"); + expect(result).toBeNull(); + }); + + test("readCacheRaw returns even expired entries", async () => { + await writeCache("expired-svc", [makeTool("t")], 1); + await new Promise((r) => setTimeout(r, 5)); + + // readCache should return null (expired) + expect(await readCache("expired-svc")).toBeNull(); + + // readCacheRaw should still return the entry + const raw = await readCacheRaw("expired-svc"); + expect(raw).not.toBeNull(); + expect(raw!.tools[0]!.name).toBe("t"); + }); +}); + +// -- isCacheExpired -- + +describe("isCacheExpired", () => { + test("returns false for fresh cache", () => { + const meta: CacheMetadata = { + service: "test", + cachedAt: new Date().toISOString(), + ttlMs: 24 * 60 * 60 * 1000, + toolCount: 1, + }; + expect(isCacheExpired(meta)).toBe(false); + }); + + test("returns true for old cache", () => { + const staleTime = new Date(Date.now() - 25 * 60 * 60 * 1000); // 25 hours ago + const meta: CacheMetadata = { + service: "test", + cachedAt: staleTime.toISOString(), + ttlMs: 24 * 60 * 60 * 1000, + toolCount: 1, + }; + expect(isCacheExpired(meta)).toBe(true); + }); + + test("respects custom TTL", () => { + const recentTime = new Date(Date.now() - 500); // 500ms ago + const meta: CacheMetadata = { + service: "test", + cachedAt: recentTime.toISOString(), + ttlMs: 1000, // 1 second TTL + toolCount: 1, + }; + expect(isCacheExpired(meta)).toBe(false); + }); +}); + +// -- clearCache -- + +describe("clearCache", () => { + test("clears specific service cache", async () => { + await writeCache("svc-a", [makeTool("t1")]); + await writeCache("svc-b", [makeTool("t2")]); + + const cleared = await clearCache("svc-a"); + expect(cleared).toBe(1); + + // svc-a should be gone + expect(await readCache("svc-a")).toBeNull(); + // svc-b should remain + expect(await readCache("svc-b")).not.toBeNull(); + }); + + test("clears all caches when no service specified", async () => { + await writeCache("svc-1", [makeTool("t1")]); + await writeCache("svc-2", [makeTool("t2")]); + await writeCache("svc-3", [makeTool("t3")]); + + const cleared = await clearCache(); + expect(cleared).toBe(3); + + expect(await readCache("svc-1")).toBeNull(); + expect(await readCache("svc-2")).toBeNull(); + expect(await readCache("svc-3")).toBeNull(); + }); + + test("returns 0 when clearing nonexistent service", async () => { + const cleared = await clearCache("nonexistent"); + expect(cleared).toBe(0); + }); + + test("returns 0 when cache directory does not exist", async () => { + process.env.MCP2CLI_CACHE_DIR = join(testDir, "nonexistent-subdir"); + const cleared = await clearCache(); + expect(cleared).toBe(0); + }); +}); + +// -- listCachedServices -- + +describe("listCachedServices", () => { + test("returns empty array when no caches exist", async () => { + const services = await listCachedServices(); + expect(services).toHaveLength(0); + }); + + test("returns service names from cache files", async () => { + await writeCache("alpha", [makeTool("t")]); + await writeCache("beta", [makeTool("t")]); + + const services = await listCachedServices(); + expect(services.sort()).toEqual(["alpha", "beta"]); + }); +}); diff --git a/tests/cli/grep.test.ts b/tests/cli/grep.test.ts new file mode 100644 index 0000000..e7d05b8 --- /dev/null +++ b/tests/cli/grep.test.ts @@ -0,0 +1,147 @@ +import { describe, expect, test, beforeEach, afterEach } from "bun:test"; +import { join } from "node:path"; +import { mkdtemp, rm } from "node:fs/promises"; +import { tmpdir } from "node:os"; +import { writeCache } from "../../src/cache/index.ts"; +import type { CachedToolSchema } from "../../src/cache/index.ts"; + +// -- Test setup -- + +let testDir: string; +let origCacheDir: string | undefined; + +beforeEach(async () => { + testDir = await mkdtemp(join(tmpdir(), "mcp2cli-grep-test-")); + origCacheDir = process.env.MCP2CLI_CACHE_DIR; + process.env.MCP2CLI_CACHE_DIR = testDir; +}); + +afterEach(async () => { + if (origCacheDir !== undefined) { + process.env.MCP2CLI_CACHE_DIR = origCacheDir; + } else { + delete process.env.MCP2CLI_CACHE_DIR; + } + await rm(testDir, { recursive: true, force: true }); +}); + +function makeTool(name: string, description: string): CachedToolSchema { + return { + name, + description, + inputSchema: { type: "object", properties: {} }, + hash: "test-hash", + }; +} + +/** + * Helper to capture stdout from handleGrep. + * Replaces console.log temporarily. + */ +async function captureGrep(args: string[]): Promise { + const { handleGrep } = await import("../../src/cli/commands/grep.ts"); + const lines: string[] = []; + const origLog = console.log; + console.log = (...logArgs: unknown[]) => { + lines.push(logArgs.map(String).join(" ")); + }; + try { + await handleGrep(args); + } finally { + console.log = origLog; + } + return lines.join("\n"); +} + +// -- Tests -- + +describe("grep command", () => { + test("finds tools by name substring", async () => { + await writeCache("n8n", [ + makeTool("list_workflows", "List all workflows"), + makeTool("get_workflow", "Get a single workflow"), + makeTool("delete_node", "Delete a node"), + ]); + + const output = await captureGrep(["workflow"]); + expect(output).toContain("n8n.list_workflows"); + expect(output).toContain("n8n.get_workflow"); + expect(output).not.toContain("delete_node"); + }); + + test("finds tools by description substring", async () => { + await writeCache("svc", [ + makeTool("tool_a", "Manages database connections"), + makeTool("tool_b", "Sends email notifications"), + ]); + + const output = await captureGrep(["database"]); + expect(output).toContain("svc.tool_a"); + expect(output).not.toContain("svc.tool_b"); + }); + + test("search is case-insensitive", async () => { + await writeCache("svc", [ + makeTool("CreateWorkflow", "Creates a new WORKFLOW"), + ]); + + const output = await captureGrep(["workflow"]); + expect(output).toContain("svc.CreateWorkflow"); + + const output2 = await captureGrep(["WORKFLOW"]); + expect(output2).toContain("svc.CreateWorkflow"); + }); + + test("returns service-qualified names in output", async () => { + await writeCache("alpha", [makeTool("my_tool", "Does things")]); + + const output = await captureGrep(["my_tool"]); + expect(output).toContain("alpha.my_tool"); + expect(output).toContain("--"); + expect(output).toContain("Does things"); + }); + + test("works across multiple services", async () => { + await writeCache("svc-a", [ + makeTool("list_items", "List items from A"), + ]); + await writeCache("svc-b", [ + makeTool("list_records", "List records from B"), + ]); + + const output = await captureGrep(["list"]); + expect(output).toContain("svc-a.list_items"); + expect(output).toContain("svc-b.list_records"); + }); + + test("returns empty message with no match", async () => { + await writeCache("svc", [ + makeTool("tool_a", "Does A things"), + ]); + + const output = await captureGrep(["nonexistent_xyz"]); + expect(output).toContain("No tools matching"); + }); + + test("handles no cached schemas gracefully", async () => { + // Empty cache dir -- no schemas + const output = await captureGrep(["anything"]); + expect(output).toContain("No cached schemas found"); + }); + + test("shows usage when no pattern provided", async () => { + const output = await captureGrep([]); + expect(output).toContain("Usage:"); + expect(output).toContain("grep"); + }); + + test("matches in both name and description return only one line", async () => { + await writeCache("svc", [ + makeTool("list_workflows", "List all workflows in the system"), + ]); + + const output = await captureGrep(["workflow"]); + const lines = output.split("\n").filter((l) => l.includes("svc.list_workflows")); + expect(lines).toHaveLength(1); + }); +}); diff --git a/tests/format/csv.test.ts b/tests/format/csv.test.ts new file mode 100644 index 0000000..b9783f7 --- /dev/null +++ b/tests/format/csv.test.ts @@ -0,0 +1,110 @@ +import { describe, test, expect } from "bun:test"; +import { formatCsv } from "../../src/format/csv.ts"; + +describe("formatCsv", () => { + test("null -> empty string", () => { + expect(formatCsv(null)).toBe(""); + }); + + test("undefined -> empty string", () => { + expect(formatCsv(undefined)).toBe(""); + }); + + // --- Single object --- + + test("single object -> header + one data row", () => { + const result = formatCsv({ id: 1, name: "Alice" }); + const lines = result.split("\n"); + expect(lines[0]).toBe("id,name"); + expect(lines[1]).toBe("1,Alice"); + }); + + test("empty object -> empty string", () => { + expect(formatCsv({})).toBe(""); + }); + + // --- Array of objects --- + + test("array of objects -> header + data rows", () => { + const data = [ + { id: 1, name: "Alice" }, + { id: 2, name: "Bob" }, + ]; + const result = formatCsv(data); + const lines = result.split("\n"); + expect(lines[0]).toBe("id,name"); + expect(lines[1]).toBe("1,Alice"); + expect(lines[2]).toBe("2,Bob"); + }); + + test("empty array -> empty string", () => { + expect(formatCsv([])).toBe(""); + }); + + // --- RFC 4180 quoting --- + + test("values with commas are quoted", () => { + const result = formatCsv({ name: "Smith, John" }); + expect(result).toContain('"Smith, John"'); + }); + + test("values with double quotes are double-escaped", () => { + const result = formatCsv({ name: 'say "hello"' }); + expect(result).toContain('"say ""hello"""'); + }); + + test("values with newlines are quoted", () => { + const result = formatCsv({ note: "line1\nline2" }); + expect(result).toContain('"line1\nline2"'); + }); + + test("null values in objects render as empty", () => { + const result = formatCsv({ id: 1, name: null }); + const lines = result.split("\n"); + expect(lines[1]).toBe("1,"); + }); + + // --- Nested objects --- + + test("nested objects are JSON-stringified", () => { + const result = formatCsv({ id: 1, settings: { timeout: 30 } }); + const lines = result.split("\n"); + // JSON contains commas and quotes, so it gets CSV-quoted + expect(lines[1]).toContain('"{""timeout"":30}"'); + }); + + // --- Array of primitives --- + + test("array of primitives -> VALUE column", () => { + const result = formatCsv(["a", "b", "c"]); + const lines = result.split("\n"); + expect(lines[0]).toBe("VALUE"); + expect(lines[1]).toBe("a"); + expect(lines[2]).toBe("b"); + expect(lines[3]).toBe("c"); + }); + + // --- Heterogeneous rows --- + + test("heterogeneous rows fill missing columns with empty", () => { + const data = [ + { id: 1, name: "Alice" }, + { id: 2, email: "bob@test.com" }, + ]; + const result = formatCsv(data); + const lines = result.split("\n"); + expect(lines[0]).toBe("id,name,email"); + expect(lines[1]).toBe("1,Alice,"); + expect(lines[2]).toBe("2,,bob@test.com"); + }); + + // --- Primitive value --- + + test("primitive string -> single value", () => { + expect(formatCsv("hello")).toBe("hello"); + }); + + test("primitive with comma -> quoted", () => { + expect(formatCsv("a,b")).toBe('"a,b"'); + }); +}); diff --git a/tests/format/index.test.ts b/tests/format/index.test.ts new file mode 100644 index 0000000..a94d8c8 --- /dev/null +++ b/tests/format/index.test.ts @@ -0,0 +1,66 @@ +import { describe, test, expect } from "bun:test"; +import { formatOutput, isValidFormat } from "../../src/format/index.ts"; + +describe("formatOutput dispatcher", () => { + const sampleData = [ + { id: 1, name: "Alice" }, + { id: 2, name: "Bob" }, + ]; + + test("json format -> JSON envelope with success wrapper", () => { + const result = formatOutput(sampleData, "json"); + const parsed = JSON.parse(result); + expect(parsed.success).toBe(true); + expect(parsed.result).toEqual(sampleData); + }); + + test("table format -> aligned columns", () => { + const result = formatOutput(sampleData, "table"); + expect(result).toContain("ID"); + expect(result).toContain("NAME"); + expect(result).toContain("Alice"); + expect(result).toContain("Bob"); + }); + + test("yaml format -> YAML key-value output", () => { + const result = formatOutput({ id: 1, name: "test" }, "yaml"); + expect(result).toContain("id: 1"); + expect(result).toContain("name: test"); + }); + + test("csv format -> comma-separated with header", () => { + const result = formatOutput(sampleData, "csv"); + const lines = result.split("\n"); + expect(lines[0]).toBe("id,name"); + expect(lines[1]).toBe("1,Alice"); + }); + + test("ndjson format -> one JSON line per array element", () => { + const result = formatOutput(sampleData, "ndjson"); + const lines = result.split("\n"); + expect(lines.length).toBe(2); + expect(JSON.parse(lines[0] as string)).toEqual({ id: 1, name: "Alice" }); + }); + + test("json format with null data", () => { + const result = formatOutput(null, "json"); + expect(JSON.parse(result)).toEqual({ success: true, result: null }); + }); +}); + +describe("isValidFormat", () => { + test("valid formats return true", () => { + expect(isValidFormat("json")).toBe(true); + expect(isValidFormat("table")).toBe(true); + expect(isValidFormat("yaml")).toBe(true); + expect(isValidFormat("csv")).toBe(true); + expect(isValidFormat("ndjson")).toBe(true); + }); + + test("invalid formats return false", () => { + expect(isValidFormat("xml")).toBe(false); + expect(isValidFormat("")).toBe(false); + expect(isValidFormat("JSON")).toBe(false); + expect(isValidFormat("tsv")).toBe(false); + }); +}); diff --git a/tests/format/ndjson.test.ts b/tests/format/ndjson.test.ts new file mode 100644 index 0000000..f4e430b --- /dev/null +++ b/tests/format/ndjson.test.ts @@ -0,0 +1,69 @@ +import { describe, test, expect } from "bun:test"; +import { formatNdjson } from "../../src/format/ndjson.ts"; + +describe("formatNdjson", () => { + test("null -> 'null'", () => { + expect(formatNdjson(null)).toBe("null"); + }); + + test("undefined -> 'null'", () => { + expect(formatNdjson(undefined)).toBe("null"); + }); + + test("single object -> one JSON line", () => { + const result = formatNdjson({ id: 1, name: "test" }); + expect(result).toBe('{"id":1,"name":"test"}'); + // Should be exactly one line + expect(result.split("\n").length).toBe(1); + }); + + test("array of objects -> one JSON line per element", () => { + const data = [ + { id: 1, name: "Alice" }, + { id: 2, name: "Bob" }, + ]; + const result = formatNdjson(data); + const lines = result.split("\n"); + expect(lines.length).toBe(2); + expect(JSON.parse(lines[0] as string)).toEqual({ id: 1, name: "Alice" }); + expect(JSON.parse(lines[1] as string)).toEqual({ id: 2, name: "Bob" }); + }); + + test("empty array -> empty string", () => { + expect(formatNdjson([])).toBe(""); + }); + + test("array of primitives -> one line per element", () => { + const result = formatNdjson([1, "two", true, null]); + const lines = result.split("\n"); + expect(lines.length).toBe(4); + expect(lines[0]).toBe("1"); + expect(lines[1]).toBe('"two"'); + expect(lines[2]).toBe("true"); + expect(lines[3]).toBe("null"); + }); + + test("each line is valid JSON", () => { + const data = [ + { id: 1, nested: { a: [1, 2] } }, + { id: 2, nested: { b: "test" } }, + ]; + const result = formatNdjson(data); + const lines = result.split("\n"); + for (const line of lines) { + expect(() => JSON.parse(line as string)).not.toThrow(); + } + }); + + test("primitive number -> stringified", () => { + expect(formatNdjson(42)).toBe("42"); + }); + + test("primitive string -> JSON string", () => { + expect(formatNdjson("hello")).toBe('"hello"'); + }); + + test("boolean -> JSON boolean", () => { + expect(formatNdjson(true)).toBe("true"); + }); +}); diff --git a/tests/format/table.test.ts b/tests/format/table.test.ts new file mode 100644 index 0000000..eeee4ae --- /dev/null +++ b/tests/format/table.test.ts @@ -0,0 +1,121 @@ +import { describe, test, expect } from "bun:test"; +import { formatTable } from "../../src/format/table.ts"; + +describe("formatTable", () => { + test("null -> (empty)", () => { + expect(formatTable(null)).toBe("(empty)"); + }); + + test("undefined -> (empty)", () => { + expect(formatTable(undefined)).toBe("(empty)"); + }); + + test("primitive string -> string value", () => { + expect(formatTable("hello")).toBe("hello"); + }); + + test("primitive number -> stringified", () => { + expect(formatTable(42)).toBe("42"); + }); + + // --- Single object (key/value table) --- + + test("single object -> key/value table with headers", () => { + const result = formatTable({ id: 1, name: "test" }); + const lines = result.split("\n"); + // Header row + expect(lines[0]).toMatch(/KEY\s+VALUE/); + // Separator + expect(lines[1]).toMatch(/^-+\s+-+$/); + // Data rows + expect(lines[2]).toMatch(/id\s+1/); + expect(lines[3]).toMatch(/name\s+test/); + }); + + test("empty object -> (empty)", () => { + expect(formatTable({})).toBe("(empty)"); + }); + + // --- Array of objects (columnar table) --- + + test("array of objects -> columnar table with aligned headers", () => { + const data = [ + { id: 1, name: "Alice", active: true }, + { id: 2, name: "Bob", active: false }, + ]; + const result = formatTable(data); + const lines = result.split("\n"); + + // Header row (uppercase) + expect(lines[0]).toMatch(/ID\s+NAME\s+ACTIVE/); + // Separator + expect(lines[1]).toMatch(/^-+\s+-+\s+-+$/); + // Data rows exist + expect(lines.length).toBe(4); // header + sep + 2 data rows + }); + + test("numeric columns are right-aligned", () => { + const data = [ + { name: "short", count: 5 }, + { name: "longer name", count: 100 }, + ]; + const result = formatTable(data); + const lines = result.split("\n"); + + // The count column values should be right-aligned + // "5" should have more leading spaces than "100" + const dataLine1 = lines[2] as string; + const dataLine2 = lines[3] as string; + // Count column: " 5" vs "100" -- the 5 should be padded + expect(dataLine1).toContain(" 5"); + expect(dataLine2).toContain("100"); + }); + + test("empty array -> (empty)", () => { + expect(formatTable([])).toBe("(empty)"); + }); + + test("array of primitives -> single VALUE column", () => { + const result = formatTable(["a", "b", "c"]); + const lines = result.split("\n"); + expect(lines[0]).toBe("VALUE"); + expect(lines[1]).toMatch(/^-+$/); + expect(lines[2]).toBe("a"); + expect(lines[3]).toBe("b"); + expect(lines[4]).toBe("c"); + }); + + test("nested objects in rows are JSON-stringified", () => { + const data = [{ id: 1, settings: { timeout: 30 } }]; + const result = formatTable(data); + expect(result).toContain('{"timeout":30}'); + }); + + test("null values in rows render as empty string", () => { + const data = [{ id: 1, name: null }]; + const result = formatTable(data); + const lines = result.split("\n"); + // The name column should be empty for the null value + expect(lines[2]).toMatch(/1\s+$/); + }); + + test("long values are truncated", () => { + const longValue = "a".repeat(100); + const data = [{ description: longValue }]; + const result = formatTable(data); + expect(result).toContain("..."); + // Should not contain the full 100-char string + expect(result).not.toContain(longValue); + }); + + test("columns from heterogeneous rows are merged", () => { + const data = [ + { id: 1, name: "Alice" }, + { id: 2, email: "bob@test.com" }, + ]; + const result = formatTable(data); + const lines = result.split("\n"); + // Should have all three columns + expect(lines[0]).toMatch(/ID\s+NAME\s+EMAIL/); + }); +}); diff --git a/tests/format/yaml.test.ts b/tests/format/yaml.test.ts new file mode 100644 index 0000000..c623794 --- /dev/null +++ b/tests/format/yaml.test.ts @@ -0,0 +1,104 @@ +import { describe, test, expect } from "bun:test"; +import { formatYaml } from "../../src/format/yaml.ts"; + +describe("formatYaml", () => { + test("null -> null", () => { + expect(formatYaml(null)).toBe("null"); + }); + + test("undefined -> null", () => { + expect(formatYaml(undefined)).toBe("null"); + }); + + test("boolean true", () => { + expect(formatYaml(true)).toBe("true"); + }); + + test("number", () => { + expect(formatYaml(42)).toBe("42"); + }); + + test("simple string without special chars", () => { + expect(formatYaml("hello")).toBe("hello"); + }); + + test("string with colon is quoted", () => { + expect(formatYaml("key: value")).toBe('"key: value"'); + }); + + test("string with newline is quoted with escape", () => { + expect(formatYaml("line1\nline2")).toBe('"line1\\nline2"'); + }); + + test("string 'true' is quoted (YAML bool ambiguity)", () => { + expect(formatYaml("true")).toBe('"true"'); + }); + + test("string '123' is quoted (YAML number ambiguity)", () => { + expect(formatYaml("123")).toBe('"123"'); + }); + + test("empty string is quoted", () => { + expect(formatYaml("")).toBe('""'); + }); + + // --- Objects --- + + test("flat object", () => { + const result = formatYaml({ id: 1, name: "test" }); + expect(result).toContain("id: 1"); + expect(result).toContain("name: test"); + }); + + test("nested object", () => { + const result = formatYaml({ settings: { timeout: 30 } }); + expect(result).toContain("settings:"); + expect(result).toContain(" timeout: 30"); + }); + + test("empty object -> {}", () => { + expect(formatYaml({})).toBe("{}"); + }); + + // --- Arrays --- + + test("array of primitives", () => { + const result = formatYaml([1, 2, 3]); + expect(result).toContain("- 1"); + expect(result).toContain("- 2"); + expect(result).toContain("- 3"); + }); + + test("empty array -> []", () => { + expect(formatYaml([])).toBe("[]"); + }); + + test("array of objects", () => { + const result = formatYaml([ + { id: 1, name: "Alice" }, + { id: 2, name: "Bob" }, + ]); + expect(result).toContain("- id: 1"); + expect(result).toContain(" name: Alice"); + expect(result).toContain("- id: 2"); + expect(result).toContain(" name: Bob"); + }); + + test("object with array value", () => { + const result = formatYaml({ tags: ["a", "b"] }); + expect(result).toContain("tags:"); + expect(result).toContain(" - a"); + expect(result).toContain(" - b"); + }); + + test("string with double quotes is escaped", () => { + const result = formatYaml('say "hello"'); + expect(result).toBe('"say \\"hello\\""'); + }); + + test("null values in objects", () => { + const result = formatYaml({ id: 1, name: null }); + expect(result).toContain("id: 1"); + expect(result).toContain("name: null"); + }); +}); diff --git a/tests/generation/auto-regen.test.ts b/tests/generation/auto-regen.test.ts new file mode 100644 index 0000000..ca60a76 --- /dev/null +++ b/tests/generation/auto-regen.test.ts @@ -0,0 +1,163 @@ +import { afterEach, beforeEach, describe, expect, test } from "bun:test"; +import { mkdtemp, rm } from "node:fs/promises"; +import { join } from "node:path"; +import { tmpdir } from "node:os"; +import { autoRegenerateSkills } from "../../src/generation/auto-regen.ts"; +import type { AccessPolicy } from "../../src/access/types.ts"; + +describe("autoRegenerateSkills", () => { + let tmpDir: string; + + beforeEach(async () => { + tmpDir = await mkdtemp(join(tmpdir(), "mcp2cli-regen-")); + }); + + afterEach(async () => { + await rm(tmpDir, { recursive: true, force: true }); + }); + + test("regenerates skill files with tool list", async () => { + const tools = [ + { name: "list_items", description: "List all items" }, + { name: "create_item", description: "Create an item" }, + { name: "delete_item", description: "Delete an item" }, + ]; + + const result = await autoRegenerateSkills("test-service", tools, {}, tmpDir); + + expect(result.regenerated).toBe(true); + expect(result.toolCount).toBe(3); + expect(result.filesWritten.length).toBeGreaterThan(0); + expect(result.error).toBeUndefined(); + + // Verify SKILL.md was written + const skillFile = Bun.file(join(tmpDir, "SKILL.md")); + expect(await skillFile.exists()).toBe(true); + const content = await skillFile.text(); + expect(content).toContain("list_items"); + expect(content).toContain("create_item"); + expect(content).toContain("delete_item"); + }); + + test("filters tools by access policy (blockTools)", async () => { + const tools = [ + { name: "list_items", description: "List items" }, + { name: "admin_reset", description: "Reset admin" }, + { name: "admin_delete", description: "Delete admin" }, + ]; + + const policy: AccessPolicy = { + blockTools: ["admin_*"], + }; + + const result = await autoRegenerateSkills("test-service", tools, policy, tmpDir); + + expect(result.regenerated).toBe(true); + expect(result.toolCount).toBe(1); + + const content = await Bun.file(join(tmpDir, "SKILL.md")).text(); + expect(content).toContain("list_items"); + expect(content).not.toContain("admin_reset"); + expect(content).not.toContain("admin_delete"); + }); + + test("filters tools by access policy (allowTools)", async () => { + const tools = [ + { name: "list_items", description: "List items" }, + { name: "get_item", description: "Get item" }, + { name: "delete_item", description: "Delete item" }, + ]; + + const policy: AccessPolicy = { + allowTools: ["list_*", "get_*"], + }; + + const result = await autoRegenerateSkills("test-service", tools, policy, tmpDir); + + expect(result.regenerated).toBe(true); + expect(result.toolCount).toBe(2); + + const content = await Bun.file(join(tmpDir, "SKILL.md")).text(); + expect(content).toContain("list_items"); + expect(content).toContain("get_item"); + expect(content).not.toContain("delete_item"); + }); + + test("skips regeneration when all tools are blocked", async () => { + const tools = [ + { name: "admin_reset", description: "Reset admin" }, + ]; + + const policy: AccessPolicy = { + blockTools: ["admin_*"], + }; + + const result = await autoRegenerateSkills("test-service", tools, policy, tmpDir); + + expect(result.regenerated).toBe(false); + expect(result.toolCount).toBe(0); + expect(result.filesWritten).toHaveLength(0); + }); + + test("preserves manual sections from existing skill file", async () => { + // Create an existing SKILL.md with manual content + const existingContent = [ + "---", + "name: test-service", + "description: MCP tools for test-service", + "triggers:", + " - test-service", + "---", + "", + "# test-service", + "", + "", + "", + "## Quick Reference", + "", + "| Tool | Description |", + "|------|-------------|", + "| old_tool | Old tool desc |", + "", + "", + "", + "## Notes", + "", + "", + "IMPORTANT: Always use list_items before create_item.", + "This is critical user documentation.", + "", + "", + ].join("\n"); + + await Bun.write(join(tmpDir, "SKILL.md"), existingContent); + + const tools = [ + { name: "list_items", description: "List items" }, + { name: "create_item", description: "Create item" }, + ]; + + const result = await autoRegenerateSkills("test-service", tools, {}, tmpDir); + + expect(result.regenerated).toBe(true); + expect(result.manualSectionsPreserved).toBe(1); + + const content = await Bun.file(join(tmpDir, "SKILL.md")).text(); + // New tools present + expect(content).toContain("list_items"); + expect(content).toContain("create_item"); + // Old tool removed + expect(content).not.toContain("old_tool"); + // Manual content preserved + expect(content).toContain("IMPORTANT: Always use list_items before create_item."); + expect(content).toContain("This is critical user documentation."); + }); + + test("handles empty tool list", async () => { + const result = await autoRegenerateSkills("test-service", [], {}, tmpDir); + + expect(result.regenerated).toBe(false); + expect(result.toolCount).toBe(0); + expect(result.filesWritten).toHaveLength(0); + }); +}); diff --git a/tests/generation/diff.test.ts b/tests/generation/diff.test.ts new file mode 100644 index 0000000..38764c5 --- /dev/null +++ b/tests/generation/diff.test.ts @@ -0,0 +1,231 @@ +import { describe, expect, test } from "bun:test"; +import { + parseExistingTools, + computeSkillDiff, + formatDiffPreview, +} from "../../src/generation/diff.ts"; + +// -- parseExistingTools -- + +describe("parseExistingTools", () => { + test("extracts tools from a quick reference table", () => { + const content = [ + "# my-service", + "", + "## Quick Reference", + "", + "| Tool | Description |", + "|------|-------------|", + "| list_items | List all items |", + "| create_item | Create a new item |", + "| delete_item | Delete an item |", + "", + "## Usage", + ].join("\n"); + + const tools = parseExistingTools(content); + expect(tools).toHaveLength(3); + expect(tools[0]!.name).toBe("list_items"); + expect(tools[0]!.description).toBe("List all items"); + expect(tools[1]!.name).toBe("create_item"); + expect(tools[2]!.name).toBe("delete_item"); + }); + + test("returns empty array when no table exists", () => { + const content = "# Just a heading\n\nSome text."; + const tools = parseExistingTools(content); + expect(tools).toHaveLength(0); + }); + + test("handles table with extra whitespace in cells", () => { + const content = [ + "| Tool | Description |", + "|------|-------------|", + "| spaced_tool | Has spaces around it |", + ].join("\n"); + + const tools = parseExistingTools(content); + expect(tools).toHaveLength(1); + expect(tools[0]!.name).toBe("spaced_tool"); + expect(tools[0]!.description).toBe("Has spaces around it"); + }); + + test("stops parsing when table ends (non-table row breaks it)", () => { + const content = [ + "## Quick Reference", + "", + "| Tool | Description |", + "|------|-------------|", + "| tool_a | Description A |", + "| tool_b | Description B |", + "", + "This is not a table row.", + ].join("\n"); + + const tools = parseExistingTools(content); + expect(tools).toHaveLength(2); + expect(tools[0]!.name).toBe("tool_a"); + expect(tools[1]!.name).toBe("tool_b"); + }); +}); + +// -- computeSkillDiff -- + +describe("computeSkillDiff", () => { + test("detects added tools", () => { + const existing = [ + { name: "tool_a", description: "Tool A" }, + ]; + const newTools = [ + { name: "tool_a", description: "Tool A" }, + { name: "tool_b", description: "Tool B" }, + ]; + + const diff = computeSkillDiff("test", existing, newTools); + expect(diff.hasChanges).toBe(true); + expect(diff.added).toHaveLength(1); + expect(diff.added[0]!.tool).toBe("tool_b"); + expect(diff.removed).toHaveLength(0); + expect(diff.modified).toHaveLength(0); + }); + + test("detects removed tools", () => { + const existing = [ + { name: "tool_a", description: "Tool A" }, + { name: "tool_b", description: "Tool B" }, + ]; + const newTools = [ + { name: "tool_a", description: "Tool A" }, + ]; + + const diff = computeSkillDiff("test", existing, newTools); + expect(diff.hasChanges).toBe(true); + expect(diff.removed).toHaveLength(1); + expect(diff.removed[0]!.tool).toBe("tool_b"); + expect(diff.added).toHaveLength(0); + }); + + test("detects modified tools (description changed)", () => { + const existing = [ + { name: "tool_a", description: "Old description" }, + ]; + const newTools = [ + { name: "tool_a", description: "New description" }, + ]; + + const diff = computeSkillDiff("test", existing, newTools); + expect(diff.hasChanges).toBe(true); + expect(diff.modified).toHaveLength(1); + expect(diff.modified[0]!.tool).toBe("tool_a"); + expect(diff.modified[0]!.details).toContain("Old description"); + expect(diff.modified[0]!.details).toContain("New description"); + }); + + test("reports no changes for identical tool lists", () => { + const tools = [ + { name: "tool_a", description: "Tool A" }, + { name: "tool_b", description: "Tool B" }, + ]; + + const diff = computeSkillDiff("test", tools, tools); + expect(diff.hasChanges).toBe(false); + expect(diff.added).toHaveLength(0); + expect(diff.removed).toHaveLength(0); + expect(diff.modified).toHaveLength(0); + }); + + test("detects mixed changes (add + remove + modify)", () => { + const existing = [ + { name: "tool_a", description: "Tool A" }, + { name: "tool_b", description: "Old B" }, + { name: "tool_c", description: "Tool C" }, + ]; + const newTools = [ + { name: "tool_a", description: "Tool A" }, + { name: "tool_b", description: "New B" }, + { name: "tool_d", description: "Tool D" }, + ]; + + const diff = computeSkillDiff("test", existing, newTools); + expect(diff.hasChanges).toBe(true); + expect(diff.added).toHaveLength(1); + expect(diff.added[0]!.tool).toBe("tool_d"); + expect(diff.removed).toHaveLength(1); + expect(diff.removed[0]!.tool).toBe("tool_c"); + expect(diff.modified).toHaveLength(1); + expect(diff.modified[0]!.tool).toBe("tool_b"); + }); + + test("includes correct counts", () => { + const existing = [ + { name: "tool_a", description: "A" }, + { name: "tool_b", description: "B" }, + ]; + const newTools = [ + { name: "tool_a", description: "A" }, + { name: "tool_c", description: "C" }, + { name: "tool_d", description: "D" }, + ]; + + const diff = computeSkillDiff("svc", existing, newTools); + expect(diff.existingToolCount).toBe(2); + expect(diff.newToolCount).toBe(3); + expect(diff.service).toBe("svc"); + }); +}); + +// -- formatDiffPreview -- + +describe("formatDiffPreview", () => { + test("formats added tools with + prefix", () => { + const diff = computeSkillDiff( + "test", + [], + [{ name: "new_tool", description: "New" }], + ); + const output = formatDiffPreview(diff); + expect(output).toContain("+ new_tool"); + expect(output).toContain("Added (1)"); + }); + + test("formats removed tools with - prefix", () => { + const diff = computeSkillDiff( + "test", + [{ name: "old_tool", description: "Old" }], + [], + ); + const output = formatDiffPreview(diff); + expect(output).toContain("- old_tool"); + expect(output).toContain("Removed (1)"); + }); + + test("formats modified tools with ~ prefix", () => { + const diff = computeSkillDiff( + "test", + [{ name: "tool_a", description: "Old desc" }], + [{ name: "tool_a", description: "New desc" }], + ); + const output = formatDiffPreview(diff); + expect(output).toContain("~ tool_a"); + expect(output).toContain("Modified (1)"); + }); + + test("reports no changes when identical", () => { + const tools = [{ name: "tool_a", description: "A" }]; + const diff = computeSkillDiff("test", tools, tools); + const output = formatDiffPreview(diff); + expect(output).toContain("No changes detected"); + }); + + test("includes tool count summary", () => { + const diff = computeSkillDiff( + "my-service", + [{ name: "a", description: "A" }], + [{ name: "a", description: "A" }, { name: "b", description: "B" }], + ); + const output = formatDiffPreview(diff); + expect(output).toContain("Existing: 1 tools"); + expect(output).toContain("New: 2 tools"); + expect(output).toContain("my-service"); + }); +}); diff --git a/tests/generation/preserve.test.ts b/tests/generation/preserve.test.ts new file mode 100644 index 0000000..817a4f8 --- /dev/null +++ b/tests/generation/preserve.test.ts @@ -0,0 +1,217 @@ +import { describe, expect, test } from "bun:test"; +import { + extractManualSections, + injectManualSections, + createManualPlaceholder, +} from "../../src/generation/preserve.ts"; + +// -- extractManualSections -- + +describe("extractManualSections", () => { + test("extracts a single manual section", () => { + const content = [ + "# Header", + "", + "", + "My custom notes", + "", + "", + "Footer", + ].join("\n"); + + const sections = extractManualSections(content); + expect(sections).toHaveLength(1); + expect(sections[0]!.content).toContain("My custom notes"); + expect(sections[0]!.content).toContain(""); + expect(sections[0]!.content).toContain(""); + expect(sections[0]!.index).toBe(0); + }); + + test("extracts multiple manual sections", () => { + const content = [ + "## Notes", + "", + "Section 1 notes", + "", + "", + "## Examples", + "", + "Section 2 examples", + "", + ].join("\n"); + + const sections = extractManualSections(content); + expect(sections).toHaveLength(2); + expect(sections[0]!.content).toContain("Section 1 notes"); + expect(sections[0]!.index).toBe(0); + expect(sections[1]!.content).toContain("Section 2 examples"); + expect(sections[1]!.index).toBe(1); + }); + + test("returns empty array when no manual sections exist", () => { + const content = [ + "# Just a normal file", + "", + "", + "Auto content", + "", + ].join("\n"); + + const sections = extractManualSections(content); + expect(sections).toHaveLength(0); + }); + + test("identifies preceding headings", () => { + const content = [ + "## Custom Notes", + "", + "", + "User content here", + "", + ].join("\n"); + + const sections = extractManualSections(content); + expect(sections).toHaveLength(1); + expect(sections[0]!.precedingHeading).toBe("## Custom Notes"); + }); + + test("handles tolerant marker whitespace", () => { + const content = [ + "", + "Content with spaces in markers", + "", + ].join("\n"); + + const sections = extractManualSections(content); + expect(sections).toHaveLength(1); + expect(sections[0]!.content).toContain("Content with spaces in markers"); + }); + + test("preserves multi-line manual content", () => { + const content = [ + "", + "Line 1", + "Line 2", + "Line 3", + "", + "With blank lines too", + "", + ].join("\n"); + + const sections = extractManualSections(content); + expect(sections).toHaveLength(1); + expect(sections[0]!.content).toContain("Line 1"); + expect(sections[0]!.content).toContain("Line 3"); + expect(sections[0]!.content).toContain("With blank lines too"); + }); +}); + +// -- injectManualSections -- + +describe("injectManualSections", () => { + test("replaces manual placeholders with preserved content", () => { + const generated = [ + "# Service", + "", + "", + "Auto content", + "", + "", + "## Notes", + "", + "", + "", + "", + ].join("\n"); + + const preserved = [ + { + content: "\nMy important notes\n", + precedingHeading: "## Notes", + index: 0, + }, + ]; + + const result = injectManualSections(generated, preserved); + expect(result).toContain("My important notes"); + expect(result).not.toContain("Add your custom notes here"); + expect(result).toContain("Auto content"); + }); + + test("appends manual sections when no placeholders exist", () => { + const generated = [ + "# Service", + "", + "", + "Auto content", + "", + ].join("\n"); + + const preserved = [ + { + content: "\nPreserved notes\n", + precedingHeading: "## Notes", + index: 0, + }, + ]; + + const result = injectManualSections(generated, preserved); + expect(result).toContain("Preserved notes"); + expect(result).toContain("Auto content"); + }); + + test("returns generated content unchanged when no sections to inject", () => { + const generated = "# Just some content\n\nNothing special."; + const result = injectManualSections(generated, []); + expect(result).toBe(generated); + }); + + test("handles multiple preserved sections with multiple placeholders", () => { + const generated = [ + "", + "placeholder 1", + "", + "", + "", + "placeholder 2", + "", + ].join("\n"); + + const preserved = [ + { + content: "\nFirst section\n", + precedingHeading: "", + index: 0, + }, + { + content: "\nSecond section\n", + precedingHeading: "", + index: 1, + }, + ]; + + const result = injectManualSections(generated, preserved); + expect(result).toContain("First section"); + expect(result).toContain("Second section"); + expect(result).not.toContain("placeholder 1"); + expect(result).not.toContain("placeholder 2"); + }); +}); + +// -- createManualPlaceholder -- + +describe("createManualPlaceholder", () => { + test("creates placeholder with label", () => { + const placeholder = createManualPlaceholder("Custom Notes"); + expect(placeholder).toContain("## Custom Notes"); + expect(placeholder).toContain(""); + expect(placeholder).toContain(""); + }); + + test("creates placeholder without label", () => { + const placeholder = createManualPlaceholder(); + expect(placeholder).not.toContain("##"); + expect(placeholder).toContain(""); + expect(placeholder).toContain(""); + }); +}); diff --git a/tests/invocation/parse.test.ts b/tests/invocation/parse.test.ts index 091bc50..7b6228e 100644 --- a/tests/invocation/parse.test.ts +++ b/tests/invocation/parse.test.ts @@ -191,4 +191,85 @@ describe("parseToolCallArgs", () => { expect(result.value.params).toEqual({}); } }); + + // --- Phase 18: --format flag --- + + test("no --format flag defaults to json", () => { + const result = parseToolCallArgs(["n8n", "list"]); + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.value.format).toBe("json"); + } + }); + + test("--format table sets format (space syntax)", () => { + const result = parseToolCallArgs(["n8n", "list", "--format", "table"]); + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.value.format).toBe("table"); + } + }); + + test("--format=yaml sets format (equals syntax)", () => { + const result = parseToolCallArgs(["n8n", "list", "--format=yaml"]); + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.value.format).toBe("yaml"); + } + }); + + test("--format csv works", () => { + const result = parseToolCallArgs(["n8n", "list", "--format", "csv"]); + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.value.format).toBe("csv"); + } + }); + + test("--format ndjson works", () => { + const result = parseToolCallArgs(["n8n", "list", "--format=ndjson"]); + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.value.format).toBe("ndjson"); + } + }); + + test("--format with invalid value returns error", () => { + const result = parseToolCallArgs(["n8n", "list", "--format", "xml"]); + expect(result.ok).toBe(false); + if (!result.ok) { + expect(result.error.code).toBe("INPUT_VALIDATION_ERROR"); + expect(result.error.message).toContain("xml"); + expect(result.error.message).toContain("Valid formats"); + } + }); + + test("--format= with invalid value returns error (equals syntax)", () => { + const result = parseToolCallArgs(["n8n", "list", "--format=tsv"]); + expect(result.ok).toBe(false); + if (!result.ok) { + expect(result.error.code).toBe("INPUT_VALIDATION_ERROR"); + expect(result.error.message).toContain("tsv"); + } + }); + + test("--format combined with other flags", () => { + const result = parseToolCallArgs([ + "n8n", + "list", + "--format", + "table", + "--params", + "{}", + "--fields=id,name", + "--dry-run", + ]); + expect(result.ok).toBe(true); + if (result.ok) { + expect(result.value.format).toBe("table"); + expect(result.value.dryRun).toBe(true); + expect(result.value.fields).toEqual(["id", "name"]); + expect(result.value.params).toEqual({}); + } + }); }); diff --git a/tests/invocation/validate.test.ts b/tests/invocation/validate.test.ts index 0202f38..d2c4e6e 100644 --- a/tests/invocation/validate.test.ts +++ b/tests/invocation/validate.test.ts @@ -10,6 +10,7 @@ describe("validateToolCallInputs", () => { params: {}, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(true); @@ -22,6 +23,7 @@ describe("validateToolCallInputs", () => { params: {}, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(false); @@ -37,6 +39,7 @@ describe("validateToolCallInputs", () => { params: {}, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(false); @@ -52,6 +55,7 @@ describe("validateToolCallInputs", () => { params: { "../key": "value" }, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(false); @@ -67,6 +71,7 @@ describe("validateToolCallInputs", () => { params: { name: "val\x00" }, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(false); @@ -85,6 +90,7 @@ describe("validateToolCallInputs", () => { }, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(true); @@ -99,6 +105,7 @@ describe("validateToolCallInputs", () => { }, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(false); @@ -119,6 +126,7 @@ describe("validateToolCallInputs", () => { }, dryRun: false, fields: [], + format: "json", }; const result = validateToolCallInputs(parsed); expect(result.valid).toBe(true); diff --git a/tests/resilience/circuit-breaker.test.ts b/tests/resilience/circuit-breaker.test.ts new file mode 100644 index 0000000..99cb458 --- /dev/null +++ b/tests/resilience/circuit-breaker.test.ts @@ -0,0 +1,457 @@ +import { describe, test, expect, beforeEach, afterEach } from "bun:test"; +import { join } from "node:path"; +import { mkdtemp, rm, readdir } from "node:fs/promises"; +import { tmpdir } from "node:os"; +import { + loadState, + saveState, + clearState, + resolveState, + recordFailure, + recordSuccess, + shouldAttemptHttp, + getCircuitBreakerDir, + getStateFilePath, +} from "../../src/resilience/index.ts"; +import type { + CircuitBreakerState, + CircuitBreakerConfig, +} from "../../src/resilience/index.ts"; + +// -- Test setup: use temp dir for circuit breaker state -- + +let testDir: string; +let origCacheDir: string | undefined; + +beforeEach(async () => { + testDir = await mkdtemp(join(tmpdir(), "mcp2cli-cb-test-")); + origCacheDir = process.env.MCP2CLI_CACHE_DIR; + // Point cache dir to testDir/schemas so circuit-breaker resolves to testDir/circuit-breaker + process.env.MCP2CLI_CACHE_DIR = join(testDir, "schemas"); +}); + +afterEach(async () => { + if (origCacheDir !== undefined) { + process.env.MCP2CLI_CACHE_DIR = origCacheDir; + } else { + delete process.env.MCP2CLI_CACHE_DIR; + } + await rm(testDir, { recursive: true, force: true }); +}); + +// -- Path resolution -- + +describe("getCircuitBreakerDir", () => { + test("resolves to sibling of MCP2CLI_CACHE_DIR", () => { + process.env.MCP2CLI_CACHE_DIR = "/custom/cache/schemas"; + expect(getCircuitBreakerDir()).toBe("/custom/cache/circuit-breaker"); + process.env.MCP2CLI_CACHE_DIR = join(testDir, "schemas"); + }); + + test("falls back to HOME-based path", () => { + delete process.env.MCP2CLI_CACHE_DIR; + const home = process.env.HOME; + expect(getCircuitBreakerDir()).toBe( + join(home!, ".cache", "mcp2cli", "circuit-breaker"), + ); + process.env.MCP2CLI_CACHE_DIR = join(testDir, "schemas"); + }); +}); + +describe("getStateFilePath", () => { + test("returns path with service name and .json extension", () => { + const path = getStateFilePath("n8n"); + expect(path).toEndWith("circuit-breaker/n8n.json"); + }); +}); + +// -- Load / Save / Clear -- + +describe("loadState", () => { + test("returns initial closed state for unknown service", async () => { + const state = await loadState("nonexistent"); + expect(state.state).toBe("closed"); + expect(state.failureCount).toBe(0); + expect(state.lastFailureAt).toBeNull(); + expect(state.openedAt).toBeNull(); + expect(state.lastSuccessAt).toBeNull(); + }); + + test("returns initial state for corrupted file", async () => { + const filePath = getStateFilePath("corrupt"); + const { mkdir: mkdirFs } = await import("node:fs/promises"); + const { dirname } = await import("node:path"); + await mkdirFs(dirname(filePath), { recursive: true }); + await Bun.write(filePath, "not json"); + + const state = await loadState("corrupt"); + expect(state.state).toBe("closed"); + expect(state.failureCount).toBe(0); + }); + + test("returns initial state for structurally invalid file", async () => { + const filePath = getStateFilePath("invalid"); + const { mkdir: mkdirFs } = await import("node:fs/promises"); + const { dirname } = await import("node:path"); + await mkdirFs(dirname(filePath), { recursive: true }); + await Bun.write(filePath, JSON.stringify({ random: "data" })); + + const state = await loadState("invalid"); + expect(state.state).toBe("closed"); + }); +}); + +describe("saveState + loadState", () => { + test("round-trips state through disk", async () => { + const state: CircuitBreakerState = { + state: "open", + failureCount: 5, + lastFailureAt: "2026-03-09T10:00:00.000Z", + openedAt: "2026-03-09T10:00:00.000Z", + lastSuccessAt: "2026-03-09T09:00:00.000Z", + }; + + await saveState("test-svc", state); + const loaded = await loadState("test-svc"); + + expect(loaded.state).toBe("open"); + expect(loaded.failureCount).toBe(5); + expect(loaded.openedAt).toBe("2026-03-09T10:00:00.000Z"); + expect(loaded.lastSuccessAt).toBe("2026-03-09T09:00:00.000Z"); + }); +}); + +describe("clearState", () => { + test("removes state file", async () => { + await saveState("clear-me", { + state: "open", + failureCount: 3, + lastFailureAt: null, + openedAt: null, + lastSuccessAt: null, + }); + + // Verify it exists + const before = await loadState("clear-me"); + expect(before.state).toBe("open"); + + await clearState("clear-me"); + + // Should return initial state now + const after = await loadState("clear-me"); + expect(after.state).toBe("closed"); + expect(after.failureCount).toBe(0); + }); + + test("does not throw for nonexistent service", async () => { + // Should not throw + await clearState("never-existed"); + }); +}); + +// -- resolveState -- + +describe("resolveState", () => { + const config: CircuitBreakerConfig = { + failureThreshold: 5, + cooldownMs: 60_000, + }; + + test("returns closed for closed state", () => { + const state: CircuitBreakerState = { + state: "closed", + failureCount: 2, + lastFailureAt: null, + openedAt: null, + lastSuccessAt: null, + }; + expect(resolveState(state, config)).toBe("closed"); + }); + + test("returns half-open for half-open state", () => { + const state: CircuitBreakerState = { + state: "half-open", + failureCount: 5, + lastFailureAt: null, + openedAt: null, + lastSuccessAt: null, + }; + expect(resolveState(state, config)).toBe("half-open"); + }); + + test("returns open when cooldown has not elapsed", () => { + const state: CircuitBreakerState = { + state: "open", + failureCount: 5, + lastFailureAt: null, + openedAt: new Date().toISOString(), // just now + lastSuccessAt: null, + }; + expect(resolveState(state, config)).toBe("open"); + }); + + test("returns half-open when cooldown has elapsed", () => { + const pastTime = new Date(Date.now() - 120_000).toISOString(); // 2 min ago + const state: CircuitBreakerState = { + state: "open", + failureCount: 5, + lastFailureAt: null, + openedAt: pastTime, + lastSuccessAt: null, + }; + expect(resolveState(state, config)).toBe("half-open"); + }); + + test("returns open when openedAt is null (edge case)", () => { + const state: CircuitBreakerState = { + state: "open", + failureCount: 5, + lastFailureAt: null, + openedAt: null, + lastSuccessAt: null, + }; + // No openedAt means we can't calculate cooldown, stays open + expect(resolveState(state, config)).toBe("open"); + }); +}); + +// -- recordFailure -- + +describe("recordFailure", () => { + const config: CircuitBreakerConfig = { + failureThreshold: 3, + cooldownMs: 60_000, + }; + + test("increments failure count in closed state", async () => { + const state = await recordFailure("fail-test", config); + expect(state.failureCount).toBe(1); + expect(state.state).toBe("closed"); + expect(state.lastFailureAt).not.toBeNull(); + }); + + test("opens circuit after reaching threshold", async () => { + await recordFailure("threshold-test", config); + await recordFailure("threshold-test", config); + const state = await recordFailure("threshold-test", config); + + expect(state.failureCount).toBe(3); + expect(state.state).toBe("open"); + expect(state.openedAt).not.toBeNull(); + }); + + test("reopens circuit on half-open probe failure", async () => { + // Manually set up a half-open state + await saveState("probe-fail", { + state: "half-open", + failureCount: 5, + lastFailureAt: null, + openedAt: null, + lastSuccessAt: null, + }); + + const state = await recordFailure("probe-fail", config); + expect(state.state).toBe("open"); + expect(state.openedAt).not.toBeNull(); + }); + + test("persists state to disk", async () => { + await recordFailure("persist-test", config); + // Read directly from disk to verify persistence + const loaded = await loadState("persist-test"); + expect(loaded.failureCount).toBe(1); + }); +}); + +// -- recordSuccess -- + +describe("recordSuccess", () => { + test("resets failure count and closes circuit", async () => { + // Set up an open circuit + await saveState("success-test", { + state: "open", + failureCount: 5, + lastFailureAt: "2026-03-09T10:00:00.000Z", + openedAt: "2026-03-09T10:00:00.000Z", + lastSuccessAt: null, + }); + + const state = await recordSuccess("success-test"); + expect(state.state).toBe("closed"); + expect(state.failureCount).toBe(0); + expect(state.lastSuccessAt).not.toBeNull(); + expect(state.openedAt).toBeNull(); + }); + + test("keeps closed state closed", async () => { + const state = await recordSuccess("already-closed"); + expect(state.state).toBe("closed"); + expect(state.failureCount).toBe(0); + }); + + test("closes half-open circuit on success", async () => { + await saveState("half-open-success", { + state: "half-open", + failureCount: 5, + lastFailureAt: null, + openedAt: "2026-03-09T09:00:00.000Z", + lastSuccessAt: null, + }); + + const state = await recordSuccess("half-open-success"); + expect(state.state).toBe("closed"); + expect(state.failureCount).toBe(0); + }); +}); + +// -- shouldAttemptHttp -- + +describe("shouldAttemptHttp", () => { + const config: CircuitBreakerConfig = { + failureThreshold: 3, + cooldownMs: 60_000, + }; + + test("returns true for closed circuit", async () => { + const result = await shouldAttemptHttp("closed-svc", config); + expect(result).toBe(true); + }); + + test("returns false for open circuit", async () => { + await saveState("open-svc", { + state: "open", + failureCount: 5, + lastFailureAt: null, + openedAt: new Date().toISOString(), // just opened + lastSuccessAt: null, + }); + + const result = await shouldAttemptHttp("open-svc", config); + expect(result).toBe(false); + }); + + test("returns true for half-open circuit (probe)", async () => { + const pastTime = new Date(Date.now() - 120_000).toISOString(); + await saveState("halfopen-svc", { + state: "open", + failureCount: 5, + lastFailureAt: null, + openedAt: pastTime, + lastSuccessAt: null, + }); + + const result = await shouldAttemptHttp("halfopen-svc", config); + expect(result).toBe(true); + }); +}); + +// -- Full state machine lifecycle -- + +describe("circuit breaker lifecycle", () => { + const config: CircuitBreakerConfig = { + failureThreshold: 2, + cooldownMs: 50, // short cooldown for testing + }; + + test("closed -> open -> half-open -> closed", async () => { + // Start closed + let attempt = await shouldAttemptHttp("lifecycle", config); + expect(attempt).toBe(true); + + // Two failures -> opens + await recordFailure("lifecycle", config); + const openState = await recordFailure("lifecycle", config); + expect(openState.state).toBe("open"); + + // Immediately after opening: should not attempt + attempt = await shouldAttemptHttp("lifecycle", config); + expect(attempt).toBe(false); + + // Wait for cooldown + await new Promise((r) => setTimeout(r, 60)); + + // After cooldown: half-open, should attempt (probe) + attempt = await shouldAttemptHttp("lifecycle", config); + expect(attempt).toBe(true); + + // Success on probe -> closes circuit + const closedState = await recordSuccess("lifecycle"); + expect(closedState.state).toBe("closed"); + expect(closedState.failureCount).toBe(0); + }); + + test("closed -> open -> half-open -> open (probe fails)", async () => { + const svc = "lifecycle-probe-fail"; + + // Two failures -> opens + await recordFailure(svc, config); + await recordFailure(svc, config); + + // Wait for cooldown + await new Promise((r) => setTimeout(r, 60)); + + // Half-open probe attempt + const attempt = await shouldAttemptHttp(svc, config); + expect(attempt).toBe(true); + + // Probe fails -> reopens + const state = await recordFailure(svc, config); + expect(state.state).toBe("open"); + }); +}); + +// -- Disk persistence across "invocations" -- + +describe("cross-invocation persistence", () => { + test("state persists and is readable after save", async () => { + const config: CircuitBreakerConfig = { + failureThreshold: 2, + cooldownMs: 60_000, + }; + + // Simulate first CLI invocation: 2 failures -> open + await recordFailure("persist-svc", config); + await recordFailure("persist-svc", config); + + // Simulate second CLI invocation: read state directly + const state = await loadState("persist-svc"); + expect(state.state).toBe("open"); + expect(state.failureCount).toBe(2); + + // shouldAttemptHttp should return false (circuit open, no cooldown elapsed) + const attempt = await shouldAttemptHttp("persist-svc", config); + expect(attempt).toBe(false); + }); + + test("atomic write prevents corruption", async () => { + // Write state, verify the file is valid JSON + await saveState("atomic-test", { + state: "closed", + failureCount: 1, + lastFailureAt: new Date().toISOString(), + openedAt: null, + lastSuccessAt: null, + }); + + const filePath = getStateFilePath("atomic-test"); + const file = Bun.file(filePath); + const content = await file.json(); + expect(content.state).toBe("closed"); + expect(content.failureCount).toBe(1); + }); + + test("no temp files left after successful write", async () => { + await saveState("no-temp", { + state: "closed", + failureCount: 0, + lastFailureAt: null, + openedAt: null, + lastSuccessAt: null, + }); + + const dir = getCircuitBreakerDir(); + const files = await readdir(dir); + const tempFiles = files.filter((f) => f.includes(".tmp.")); + expect(tempFiles).toHaveLength(0); + }); +}); diff --git a/tests/resilience/pool-fallback.test.ts b/tests/resilience/pool-fallback.test.ts new file mode 100644 index 0000000..af8e0e3 --- /dev/null +++ b/tests/resilience/pool-fallback.test.ts @@ -0,0 +1,283 @@ +import { describe, test, expect, beforeEach, afterEach, mock } from "bun:test"; +import { join } from "node:path"; +import { mkdtemp, rm } from "node:fs/promises"; +import { tmpdir } from "node:os"; +import type { McpConnection } from "../../src/connection/types.ts"; +import type { ServicesConfig } from "../../src/config/index.ts"; +import { saveState } from "../../src/resilience/index.ts"; +import type { CircuitBreakerState } from "../../src/resilience/index.ts"; + +// -- Mocks -- + +const mockHttpConnection: McpConnection = { + client: { + callTool: mock(() => Promise.resolve({})), + listTools: mock(() => Promise.resolve({ tools: [] })), + } as never, + close: mock(() => Promise.resolve()), +}; + +const mockStdioConnection: McpConnection = { + client: { + callTool: mock(() => Promise.resolve({})), + listTools: mock(() => Promise.resolve({ tools: [] })), + } as never, + close: mock(() => Promise.resolve()), +}; + +const mockConnectToHttpService = mock(async () => mockHttpConnection); +const mockConnectToService = mock(async () => mockStdioConnection); + +mock.module("../../src/connection/index.ts", () => ({ + connectToHttpService: mockConnectToHttpService, + connectToService: mockConnectToService, +})); + +// Import pool AFTER mocking +const { ConnectionPool } = await import("../../src/daemon/pool.ts"); + +// -- Test setup -- + +let testDir: string; +let origCacheDir: string | undefined; + +const httpWithFallbackConfig: ServicesConfig = { + services: { + "gateway-svc": { + backend: "http" as const, + url: "http://mcp-gateway:3000/n8n", + headers: {}, + fallback: { + command: "npx", + args: ["-y", "n8n-mcp"], + env: {}, + }, + }, + }, +}; + +const httpNoFallbackConfig: ServicesConfig = { + services: { + "no-fallback": { + backend: "http" as const, + url: "http://mcp-gateway:3000/vault", + headers: {}, + }, + }, +}; + +beforeEach(async () => { + testDir = await mkdtemp(join(tmpdir(), "mcp2cli-pool-fb-test-")); + origCacheDir = process.env.MCP2CLI_CACHE_DIR; + process.env.MCP2CLI_CACHE_DIR = join(testDir, "schemas"); + + mockConnectToHttpService.mockClear(); + mockConnectToService.mockClear(); + mockConnectToHttpService.mockImplementation(async () => mockHttpConnection); + mockConnectToService.mockImplementation(async () => mockStdioConnection); +}); + +afterEach(async () => { + if (origCacheDir !== undefined) { + process.env.MCP2CLI_CACHE_DIR = origCacheDir; + } else { + delete process.env.MCP2CLI_CACHE_DIR; + } + await rm(testDir, { recursive: true, force: true }); +}); + +// -- Tests -- + +describe("pool HTTP with fallback", () => { + test("connects via HTTP when gateway is reachable", async () => { + const pool = new ConnectionPool(); + const conn = await pool.getConnection("gateway-svc", httpWithFallbackConfig); + + expect(conn).toBeDefined(); + expect(mockConnectToHttpService).toHaveBeenCalledTimes(1); + expect(mockConnectToService).not.toHaveBeenCalled(); + + await pool.closeAll(); + }); + + test("falls back to stdio when HTTP fails", async () => { + mockConnectToHttpService.mockImplementation(async () => { + throw new Error("Connection refused"); + }); + + const pool = new ConnectionPool(); + const conn = await pool.getConnection("gateway-svc", httpWithFallbackConfig); + + expect(conn).toBeDefined(); + expect(mockConnectToHttpService).toHaveBeenCalledTimes(1); + expect(mockConnectToService).toHaveBeenCalledTimes(1); + + await pool.closeAll(); + }); + + test("skips HTTP when circuit is open, uses fallback directly", async () => { + // Pre-set open circuit state + const openState: CircuitBreakerState = { + state: "open", + failureCount: 5, + lastFailureAt: new Date().toISOString(), + openedAt: new Date().toISOString(), + lastSuccessAt: null, + }; + await saveState("gateway-svc", openState); + + const pool = new ConnectionPool(); + const conn = await pool.getConnection("gateway-svc", httpWithFallbackConfig); + + expect(conn).toBeDefined(); + // HTTP should NOT have been attempted + expect(mockConnectToHttpService).not.toHaveBeenCalled(); + // Stdio fallback should have been used + expect(mockConnectToService).toHaveBeenCalledTimes(1); + + await pool.closeAll(); + }); + + test("throws when HTTP fails and no fallback configured", async () => { + mockConnectToHttpService.mockImplementation(async () => { + throw new Error("Connection refused"); + }); + + const pool = new ConnectionPool(); + await expect( + pool.getConnection("no-fallback", httpNoFallbackConfig), + ).rejects.toThrow("Connection refused"); + + expect(mockConnectToHttpService).toHaveBeenCalledTimes(1); + expect(mockConnectToService).not.toHaveBeenCalled(); + }); + + test("throws when circuit open and no fallback configured", async () => { + const openState: CircuitBreakerState = { + state: "open", + failureCount: 5, + lastFailureAt: new Date().toISOString(), + openedAt: new Date().toISOString(), + lastSuccessAt: null, + }; + await saveState("no-fallback", openState); + + const pool = new ConnectionPool(); + await expect( + pool.getConnection("no-fallback", httpNoFallbackConfig), + ).rejects.toThrow("Circuit breaker open"); + + expect(mockConnectToHttpService).not.toHaveBeenCalled(); + }); + + test("fallback receives correct stdio config from fallback field", async () => { + mockConnectToHttpService.mockImplementation(async () => { + throw new Error("Gateway down"); + }); + + const pool = new ConnectionPool(); + await pool.getConnection("gateway-svc", httpWithFallbackConfig); + + expect(mockConnectToService).toHaveBeenCalledTimes(1); + const calls = mockConnectToService.mock.calls as unknown as Array<[{ backend: string; command: string; args: string[]; env: Record }]>; + const callArg = calls[0]![0]; + expect(callArg.backend).toBe("stdio"); + expect(callArg.command).toBe("npx"); + expect(callArg.args).toEqual(["-y", "n8n-mcp"]); + + await pool.closeAll(); + }); + + test("records success when HTTP connects", async () => { + const pool = new ConnectionPool(); + await pool.getConnection("gateway-svc", httpWithFallbackConfig); + + // Verify circuit breaker recorded success + const { loadState } = await import("../../src/resilience/index.ts"); + const state = await loadState("gateway-svc"); + expect(state.state).toBe("closed"); + expect(state.failureCount).toBe(0); + expect(state.lastSuccessAt).not.toBeNull(); + + await pool.closeAll(); + }); + + test("records failure when HTTP fails", async () => { + mockConnectToHttpService.mockImplementation(async () => { + throw new Error("Timeout"); + }); + + const pool = new ConnectionPool(); + await pool.getConnection("gateway-svc", httpWithFallbackConfig); + + const { loadState } = await import("../../src/resilience/index.ts"); + const state = await loadState("gateway-svc"); + expect(state.failureCount).toBe(1); + + await pool.closeAll(); + }); +}); + +describe("pool HTTP fallback - schema config validation", () => { + test("services.json with fallback parses correctly", async () => { + const { ServicesConfigSchema } = await import("../../src/config/index.ts"); + + const result = ServicesConfigSchema.safeParse({ + services: { + n8n: { + backend: "http", + url: "http://mcp-gateway:3000/n8n", + fallback: { + command: "npx", + args: ["-y", "n8n-mcp"], + }, + }, + }, + }); + + expect(result.success).toBe(true); + if (result.success) { + const svc = result.data.services.n8n!; + expect(svc.backend).toBe("http"); + if (svc.backend === "http") { + expect(svc.fallback).toBeDefined(); + expect(svc.fallback!.command).toBe("npx"); + expect(svc.fallback!.args).toEqual(["-y", "n8n-mcp"]); + expect(svc.fallback!.env).toEqual({}); + } + } + }); + + test("HTTP service without fallback still valid", async () => { + const { ServicesConfigSchema } = await import("../../src/config/index.ts"); + + const result = ServicesConfigSchema.safeParse({ + services: { + vault: { + backend: "http", + url: "http://mcp-gateway:3000/vault", + }, + }, + }); + + expect(result.success).toBe(true); + }); + + test("fallback with invalid command fails", async () => { + const { ServicesConfigSchema } = await import("../../src/config/index.ts"); + + const result = ServicesConfigSchema.safeParse({ + services: { + bad: { + backend: "http", + url: "http://mcp-gateway:3000/bad", + fallback: { + command: "", // empty command should fail + }, + }, + }, + }); + + expect(result.success).toBe(false); + }); +}); From ea09a5cd082c6e56c81218197f034fc6c25e432f Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Mon, 9 Mar 2026 19:01:51 -0400 Subject: [PATCH 2/6] docs: add example configs, generated skills, and fix test cache isolation Examples: - examples/services-basic.json -- single service config - examples/services-multi.json -- multi-service with access control - examples/services-http-fallback.json -- HTTP gateway with stdio fallback - examples/skills/qmd/ -- real generated skill for QMD (6 tools, 247 tokens) - examples/skills/n8n/ -- real generated skill for n8n (20 tools, 467 tokens) - examples/skills/vaultwarden-secrets/ -- real generated skill (11 tools, 328 tokens) Test isolation: - Fix 4 daemon test files leaking cache artifacts into ~/.cache/mcp2cli/ - All tests now use isolated MCP2CLI_CACHE_DIR via mkdtemp README: - Add description fields to all example config blocks Co-Authored-By: Claude Opus 4.6 --- README.md | 4 + examples/services-basic.json | 10 + examples/services-http-fallback.json | 13 ++ examples/services-multi.json | 18 ++ examples/skills/n8n/SKILL.md | 60 ++++++ examples/skills/n8n/references/general-ops.md | 99 +++++++++ examples/skills/n8n/references/node-ops.md | 68 ++++++ .../skills/n8n/references/template-ops.md | 70 +++++++ .../skills/n8n/references/workflow-ops.md | 196 ++++++++++++++++++ examples/skills/qmd/SKILL.md | 46 ++++ examples/skills/qmd/references/general-ops.md | 72 +++++++ examples/skills/qmd/references/get-ops.md | 43 ++++ examples/skills/vaultwarden-secrets/SKILL.md | 51 +++++ .../references/general-ops.md | 76 +++++++ .../references/secret-ops.md | 117 +++++++++++ tests/daemon/observability.test.ts | 8 + tests/daemon/pool-hardening.test.ts | 22 ++ tests/daemon/pool.test.ts | 24 ++- tests/daemon/timeout.test.ts | 1 + 19 files changed, 997 insertions(+), 1 deletion(-) create mode 100644 examples/services-basic.json create mode 100644 examples/services-http-fallback.json create mode 100644 examples/services-multi.json create mode 100644 examples/skills/n8n/SKILL.md create mode 100644 examples/skills/n8n/references/general-ops.md create mode 100644 examples/skills/n8n/references/node-ops.md create mode 100644 examples/skills/n8n/references/template-ops.md create mode 100644 examples/skills/n8n/references/workflow-ops.md create mode 100644 examples/skills/qmd/SKILL.md create mode 100644 examples/skills/qmd/references/general-ops.md create mode 100644 examples/skills/qmd/references/get-ops.md create mode 100644 examples/skills/vaultwarden-secrets/SKILL.md create mode 100644 examples/skills/vaultwarden-secrets/references/general-ops.md create mode 100644 examples/skills/vaultwarden-secrets/references/secret-ops.md diff --git a/README.md b/README.md index d2b8ac9..89522e3 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,7 @@ mcp2cli discovers MCP servers from `~/.config/mcp2cli/services.json`: { "services": { "n8n": { + "description": "n8n workflow automation", "backend": "stdio", "command": "npx", "args": ["-y", "@anthropic-ai/n8n-mcp"], @@ -60,6 +61,7 @@ mcp2cli discovers MCP servers from `~/.config/mcp2cli/services.json`: } }, "homekit": { + "description": "HomeKit smart home control", "backend": "stdio", "command": "node", "args": ["/path/to/homekit-mcp/dist/index.js"], @@ -279,6 +281,7 @@ Restrict which tools are exposed per service using `allowTools` and `blockTools` { "services": { "n8n": { + "description": "n8n workflow automation", "backend": "stdio", "command": "npx", "args": ["-y", "@anthropic/n8n-mcp"], @@ -313,6 +316,7 @@ HTTP/SSE services can define a `fallback` stdio config. If the remote gateway is { "services": { "n8n": { + "description": "n8n via HTTP gateway with stdio fallback", "backend": "http", "url": "http://mcp-gateway:3000/n8n", "fallback": { diff --git a/examples/services-basic.json b/examples/services-basic.json new file mode 100644 index 0000000..bb1ef6a --- /dev/null +++ b/examples/services-basic.json @@ -0,0 +1,10 @@ +{ + "services": { + "n8n": { + "description": "n8n workflow automation", + "backend": "stdio", + "command": "npx", + "args": ["-y", "@anthropic/n8n-mcp"] + } + } +} diff --git a/examples/services-http-fallback.json b/examples/services-http-fallback.json new file mode 100644 index 0000000..f07edd2 --- /dev/null +++ b/examples/services-http-fallback.json @@ -0,0 +1,13 @@ +{ + "services": { + "n8n": { + "description": "n8n via HTTP gateway with stdio fallback", + "backend": "http", + "url": "http://mcp-gateway.local:3000/n8n", + "fallback": { + "command": "npx", + "args": ["-y", "@anthropic/n8n-mcp"] + } + } + } +} diff --git a/examples/services-multi.json b/examples/services-multi.json new file mode 100644 index 0000000..a0860af --- /dev/null +++ b/examples/services-multi.json @@ -0,0 +1,18 @@ +{ + "services": { + "n8n": { + "description": "n8n workflow automation", + "backend": "stdio", + "command": "npx", + "args": ["-y", "@anthropic/n8n-mcp"], + "allowTools": ["n8n_list_*", "n8n_get_*", "n8n_search_*"], + "blockTools": ["n8n_delete_*"] + }, + "qmd": { + "description": "QMD document search and retrieval", + "backend": "stdio", + "command": "qmd", + "args": ["mcp"] + } + } +} diff --git a/examples/skills/n8n/SKILL.md b/examples/skills/n8n/SKILL.md new file mode 100644 index 0000000..6577e89 --- /dev/null +++ b/examples/skills/n8n/SKILL.md @@ -0,0 +1,60 @@ +--- +name: n8n +description: MCP tools for n8n +triggers: + - n8n + - workflow + - node + - detail + - levels + - multiple + - modes + - template +--- + +# n8n + +MCP tools for n8n + + + +## Quick Reference + +| Tool | Description | +|------|-------------| +| get_node | Get node info with progressive detail levels and multiple modes. | +| get_template | Get template by ID. | +| n8n_autofix_workflow | Automatically fix common workflow validation errors. | +| n8n_create_workflow | Create workflow. | +| n8n_delete_workflow | Permanently delete a workflow. | +| n8n_deploy_template | Deploy a workflow template from n8n. | +| n8n_executions | Manage workflow executions: get details, list, or delete. | +| n8n_get_workflow | Get workflow by ID with different detail levels. | +| n8n_health_check | Check n8n instance health and API connectivity. | +| n8n_list_workflows | List workflows (minimal metadata only). | +| n8n_test_workflow | Test/trigger workflow execution. | +| n8n_update_full_workflow | Full workflow update. | +| n8n_update_partial_workflow | Update workflow incrementally with diff operations. | +| n8n_validate_workflow | Validate workflow by ID. | +| n8n_workflow_versions | Manage workflow version history, rollback, and cleanup. | +| search_nodes | Search n8n nodes by keyword with optional real-world examples. | +| search_templates | Search templates with multiple modes. | +| tools_documentation | Get documentation for n8n MCP tools. | +| validate_node | Validate n8n node configuration. | +| validate_workflow | Full workflow validation: structure, connections, expressions, AI tools. | + +## Usage + +```bash +mcp2cli n8n --params '{...}' +``` + +See `references/` for detailed parameter docs per tool. + + + +## Notes + + + + diff --git a/examples/skills/n8n/references/general-ops.md b/examples/skills/n8n/references/general-ops.md new file mode 100644 index 0000000..ebc8370 --- /dev/null +++ b/examples/skills/n8n/references/general-ops.md @@ -0,0 +1,99 @@ +# n8n -- General + + + +## n8n_executions + +Manage workflow executions: get details, list, or delete. Use action='get' with id for execution details, action='list' for listing executions, action='delete' to remove execution record. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| action | string | Yes | Operation: get=get execution details, list=list executions, delete=delete execution | +| id | string | No | Execution ID (required for action=get or action=delete) | +| mode | string | No | For action=get: preview=structure only, summary=2 items (default), filtered=custom, full=all data, error=optimized error debugging | +| nodeNames | array | No | For action=get with mode=filtered: filter to specific nodes by name | +| itemsLimit | number | No | For action=get with mode=filtered: items per node (0=structure, 2=default, -1=unlimited) | +| includeInputData | boolean | No | For action=get: include input data in addition to output (default: false) | +| errorItemsLimit | number | No | For action=get with mode=error: sample items from upstream node (default: 2, max: 100) | +| includeStackTrace | boolean | No | For action=get with mode=error: include full stack trace (default: false, shows truncated) | +| includeExecutionPath | boolean | No | For action=get with mode=error: include execution path leading to error (default: true) | +| fetchWorkflow | boolean | No | For action=get with mode=error: fetch workflow for accurate upstream detection (default: true) | +| limit | number | No | For action=list: number of executions to return (1-100, default: 100) | +| cursor | string | No | For action=list: pagination cursor from previous response | +| workflowId | string | No | For action=list: filter by workflow ID | +| projectId | string | No | For action=list: filter by project ID (enterprise feature) | +| status | string | No | For action=list: filter by execution status | +| includeData | boolean | No | For action=list: include execution data (default: false) | + +### Example + +```bash +mcp2cli n8n n8n_executions --params '{"action":"value"}' +``` + +## n8n_health_check + +Check n8n instance health and API connectivity. Use mode='diagnostic' for detailed troubleshooting with env vars and tool status. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| mode | string | No | Mode: "status" (default) for quick health check, "diagnostic" for detailed debug info including env vars and tool status | +| verbose | boolean | No | Include extra details in diagnostic mode (default: false) | + +### Example + +```bash +mcp2cli n8n n8n_health_check +``` + +## n8n_workflow_versions + +Manage workflow version history, rollback, and cleanup. Six modes: +- list: Show version history for a workflow +- get: Get details of specific version +- rollback: Restore workflow to previous version (creates backup first) +- delete: Delete specific version or all versions for a workflow +- prune: Manually trigger pruning to keep N most recent versions +- truncate: Delete ALL versions for ALL workflows (requires confirmation) + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| mode | string | Yes | Operation mode | +| workflowId | string | No | Workflow ID (required for list, rollback, delete, prune) | +| versionId | number | No | Version ID (required for get mode and single version delete, optional for rollback) | +| limit | number | No | Max versions to return in list mode | +| validateBefore | boolean | No | Validate workflow structure before rollback | +| deleteAll | boolean | No | Delete all versions for workflow (delete mode only) | +| maxVersions | number | No | Keep N most recent versions (prune mode only) | +| confirmTruncate | boolean | No | REQUIRED: Must be true to truncate all versions (truncate mode only) | + +### Example + +```bash +mcp2cli n8n n8n_workflow_versions --params '{"mode":"value"}' +``` + +## tools_documentation + +Get documentation for n8n MCP tools. Call without parameters for quick start guide. Use topic parameter to get documentation for specific tools. Use depth='full' for comprehensive documentation. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| topic | string | No | Tool name (e.g., "search_nodes") or "overview" for general guide. Leave empty for quick reference. | +| depth | string | No | Level of detail. "essentials" (default) for quick reference, "full" for comprehensive docs. | + +### Example + +```bash +mcp2cli n8n tools_documentation +``` + + diff --git a/examples/skills/n8n/references/node-ops.md b/examples/skills/n8n/references/node-ops.md new file mode 100644 index 0000000..ed64e7f --- /dev/null +++ b/examples/skills/n8n/references/node-ops.md @@ -0,0 +1,68 @@ +# n8n -- Node Operations + + + +## get_node + +Get node info with progressive detail levels and multiple modes. Detail: minimal (~200 tokens), standard (~1-2K, default), full (~3-8K). Modes: info (default), docs (markdown documentation), search_properties (find properties), versions/compare/breaking/migrations (version info). Use format='docs' for readable documentation, mode='search_properties' with propertyQuery for finding specific fields. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| nodeType | string | Yes | Full node type: "nodes-base.httpRequest" or "nodes-langchain.agent" | +| detail | string | No | Information detail level. standard=essential properties (recommended), full=everything | +| mode | string | No | Operation mode. info=node schema, docs=readable markdown documentation, search_properties=find specific properties, versions/compare/breaking/migrations=version info | +| includeTypeInfo | boolean | No | Include type structure metadata (type category, JS type, validation rules). Only applies to mode=info. Adds ~80-120 tokens per property. | +| includeExamples | boolean | No | Include real-world configuration examples from templates. Only applies to mode=info with detail=standard. Adds ~200-400 tokens per example. | +| fromVersion | string | No | Source version for compare/breaking/migrations modes (e.g., "1.0") | +| toVersion | string | No | Target version for compare mode (e.g., "2.0"). Defaults to latest if omitted. | +| propertyQuery | string | No | For mode=search_properties: search term to find properties (e.g., "auth", "header", "body") | +| maxPropertyResults | number | No | For mode=search_properties: max results (default 20) | + +### Example + +```bash +mcp2cli n8n get_node --params '{"nodeType":"value"}' +``` + +## search_nodes + +Search n8n nodes by keyword with optional real-world examples. Pass query as string. Example: query="webhook" or query="database". Returns max 20 results. Use includeExamples=true to get top 2 template configs per node. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| query | string | Yes | Search terms. Use quotes for exact phrase. | +| limit | number | No | Max results (default 20) | +| mode | string | No | OR=any word, AND=all words, FUZZY=typo-tolerant | +| includeExamples | boolean | No | Include top 2 real-world configuration examples from popular templates (default: false) | +| source | string | No | Filter by node source: all=everything (default), core=n8n base nodes, community=community nodes, verified=verified community nodes only | + +### Example + +```bash +mcp2cli n8n search_nodes --params '{"query":"value"}' +``` + +## validate_node + +Validate n8n node configuration. Use mode='full' for comprehensive validation with errors/warnings/suggestions, mode='minimal' for quick required fields check. Example: nodeType="nodes-base.slack", config={resource:"channel",operation:"create"} + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| nodeType | string | Yes | Node type as string. Example: "nodes-base.slack" | +| config | object | Yes | Configuration as object. For simple nodes use {}. For complex nodes include fields like {resource:"channel",operation:"create"} | +| mode | string | No | Validation mode. full=comprehensive validation with errors/warnings/suggestions, minimal=quick required fields check only. Default is "full" | +| profile | string | No | Profile for mode=full: "minimal", "runtime", "ai-friendly", or "strict". Default is "ai-friendly" | + +### Example + +```bash +mcp2cli n8n validate_node --params '{"nodeType":"value","config":{}}' +``` + + diff --git a/examples/skills/n8n/references/template-ops.md b/examples/skills/n8n/references/template-ops.md new file mode 100644 index 0000000..a72c010 --- /dev/null +++ b/examples/skills/n8n/references/template-ops.md @@ -0,0 +1,70 @@ +# n8n -- Template Operations + + + +## get_template + +Get template by ID. Use mode to control response size: nodes_only (minimal), structure (nodes+connections), full (complete workflow). + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| templateId | number | Yes | The template ID to retrieve | +| mode | string | No | Response detail level. nodes_only: just node list, structure: nodes+connections, full: complete workflow JSON. | + +### Example + +```bash +mcp2cli n8n get_template --params '{"templateId":1}' +``` + +## n8n_deploy_template + +Deploy a workflow template from n8n.io directly to your n8n instance. Deploys first, then auto-fixes common issues (expression format, typeVersions). Returns workflow ID, required credentials, and fixes applied. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| templateId | number | Yes | Template ID from n8n.io (required) | +| name | string | No | Custom workflow name (default: template name) | +| autoUpgradeVersions | boolean | No | Automatically upgrade node typeVersions to latest supported (default: true) | +| autoFix | boolean | No | Auto-apply fixes after deployment for expression format issues, missing = prefix, etc. (default: true) | +| stripCredentials | boolean | No | Remove credential references from nodes - user configures in n8n UI (default: true) | + +### Example + +```bash +mcp2cli n8n n8n_deploy_template --params '{"templateId":1}' +``` + +## search_templates + +Search templates with multiple modes. Use searchMode='keyword' for text search, 'by_nodes' to find templates using specific nodes, 'by_task' for curated task-based templates, 'by_metadata' for filtering by complexity/setup time/services. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| searchMode | string | No | Search mode. keyword=text search (default), by_nodes=find by node types, by_task=curated task templates, by_metadata=filter by complexity/services | +| query | string | No | For searchMode=keyword: search keyword (e.g., "chatbot") | +| fields | array | No | For searchMode=keyword: fields to include in response. Default: all fields. | +| nodeTypes | array | No | For searchMode=by_nodes: array of node types (e.g., ["n8n-nodes-base.httpRequest", "n8n-nodes-base.slack"]) | +| task | string | No | For searchMode=by_task: the type of task | +| category | string | No | For searchMode=by_metadata: filter by category (e.g., "automation", "integration") | +| complexity | string | No | For searchMode=by_metadata: filter by complexity level | +| maxSetupMinutes | number | No | For searchMode=by_metadata: maximum setup time in minutes | +| minSetupMinutes | number | No | For searchMode=by_metadata: minimum setup time in minutes | +| requiredService | string | No | For searchMode=by_metadata: filter by required service (e.g., "openai", "slack") | +| targetAudience | string | No | For searchMode=by_metadata: filter by target audience (e.g., "developers", "marketers") | +| limit | number | No | Maximum number of results. Default 20. | +| offset | number | No | Pagination offset. Default 0. | + +### Example + +```bash +mcp2cli n8n search_templates +``` + + diff --git a/examples/skills/n8n/references/workflow-ops.md b/examples/skills/n8n/references/workflow-ops.md new file mode 100644 index 0000000..e5e849a --- /dev/null +++ b/examples/skills/n8n/references/workflow-ops.md @@ -0,0 +1,196 @@ +# n8n -- Workflow Operations + + + +## n8n_autofix_workflow + +Automatically fix common workflow validation errors. Preview fixes or apply them. Fixes expression format, typeVersion, error output config, webhook paths, connection structure issues (numeric keys, invalid types, ID-to-name, duplicates, out-of-bounds indices). + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| id | string | Yes | Workflow ID to fix | +| applyFixes | boolean | No | Apply fixes to workflow (default: false - preview mode) | +| fixTypes | array | No | Types of fixes to apply (default: all) | +| confidenceThreshold | string | No | Minimum confidence level for fixes (default: medium) | +| maxFixes | number | No | Maximum number of fixes to apply (default: 50) | + +### Example + +```bash +mcp2cli n8n n8n_autofix_workflow --params '{"id":"value"}' +``` + +## n8n_create_workflow + +Create workflow. Requires: name, nodes[], connections{}. Created inactive. Returns workflow with ID. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| name | string | Yes | Workflow name (required) | +| nodes | array | Yes | Array of workflow nodes. Each node must have: id, name, type, typeVersion, position, and parameters | +| connections | object | Yes | Workflow connections object. Keys are source node names (the name field, not id), values define output connections | +| settings | object | No | Optional workflow settings (execution order, timezone, error handling) | + +### Example + +```bash +mcp2cli n8n n8n_create_workflow --params '{"name":"value","nodes":[],"connections":{}}' +``` + +## n8n_delete_workflow + +Permanently delete a workflow. This action cannot be undone. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| id | string | Yes | Workflow ID to delete | + +### Example + +```bash +mcp2cli n8n n8n_delete_workflow --params '{"id":"value"}' +``` + +## n8n_get_workflow + +Get workflow by ID with different detail levels. Use mode='full' for complete workflow, 'details' for metadata+stats, 'structure' for nodes/connections only, 'minimal' for id/name/active/tags. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| id | string | Yes | Workflow ID | +| mode | string | No | Detail level: full=complete workflow, details=full+execution stats, structure=nodes/connections topology, minimal=metadata only | + +### Example + +```bash +mcp2cli n8n n8n_get_workflow --params '{"id":"value"}' +``` + +## n8n_list_workflows + +List workflows (minimal metadata only). Returns id/name/active/dates/tags. Check hasMore/nextCursor for pagination. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| limit | number | No | Number of workflows to return (1-100, default: 100) | +| cursor | string | No | Pagination cursor from previous response | +| active | boolean | No | Filter by active status | +| tags | array | No | Filter by tags (exact match) | +| projectId | string | No | Filter by project ID (enterprise feature) | +| excludePinnedData | boolean | No | Exclude pinned data from response (default: true) | + +### Example + +```bash +mcp2cli n8n n8n_list_workflows +``` + +## n8n_test_workflow + +Test/trigger workflow execution. Auto-detects trigger type (webhook/form/chat). Supports: webhook (HTTP), form (fields), chat (message). Note: Only workflows with these trigger types can be executed externally. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| workflowId | string | Yes | Workflow ID to execute (required) | +| triggerType | string | No | Trigger type. Auto-detected if not specified. Workflow must have a matching trigger node. | +| httpMethod | string | No | For webhook: HTTP method (default: from workflow config or POST) | +| webhookPath | string | No | For webhook: override the webhook path | +| message | string | No | For chat: message to send (required for chat triggers) | +| sessionId | string | No | For chat: session ID for conversation continuity | +| data | object | No | Input data/payload for webhook, form fields, or execution data | +| headers | object | No | Custom HTTP headers | +| timeout | number | No | Timeout in ms (default: 120000) | +| waitForResponse | boolean | No | Wait for workflow completion (default: true) | + +### Example + +```bash +mcp2cli n8n n8n_test_workflow --params '{"workflowId":"value"}' +``` + +## n8n_update_full_workflow + +Full workflow update. Requires complete nodes[] and connections{}. For incremental use n8n_update_partial_workflow. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| id | string | Yes | Workflow ID to update | +| name | string | No | New workflow name | +| nodes | array | No | Complete array of workflow nodes (required if modifying workflow structure) | +| connections | object | No | Complete connections object (required if modifying workflow structure) | +| settings | object | No | Workflow settings to update | + +### Example + +```bash +mcp2cli n8n n8n_update_full_workflow --params '{"id":"value"}' +``` + +## n8n_update_partial_workflow + +Update workflow incrementally with diff operations. Types: addNode, removeNode, updateNode, moveNode, enable/disableNode, addConnection, removeConnection, updateSettings, updateName, add/removeTag. See tools_documentation("n8n_update_partial_workflow", "full") for details. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| id | string | Yes | Workflow ID to update | +| operations | array | Yes | Array of diff operations to apply. Each operation must have a "type" field and relevant properties for that operation type. | +| validateOnly | boolean | No | If true, only validate operations without applying them | +| continueOnError | boolean | No | If true, apply valid operations even if some fail (best-effort mode). Returns applied and failed operation indices. Default: false (atomic) | + +### Example + +```bash +mcp2cli n8n n8n_update_partial_workflow --params '{"id":"value","operations":[]}' +``` + +## n8n_validate_workflow + +Validate workflow by ID. Checks nodes, connections, expressions. Returns errors/warnings/suggestions. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| id | string | Yes | Workflow ID to validate | +| options | object | No | Validation options | + +### Example + +```bash +mcp2cli n8n n8n_validate_workflow --params '{"id":"value"}' +``` + +## validate_workflow + +Full workflow validation: structure, connections, expressions, AI tools. Returns errors/warnings/fixes. Essential before deploy. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| workflow | object | Yes | The complete workflow JSON to validate. Must include nodes array and connections object. | +| options | object | No | Optional validation settings | + +### Example + +```bash +mcp2cli n8n validate_workflow --params '{"workflow":{}}' +``` + + diff --git a/examples/skills/qmd/SKILL.md b/examples/skills/qmd/SKILL.md new file mode 100644 index 0000000..65b3cf1 --- /dev/null +++ b/examples/skills/qmd/SKILL.md @@ -0,0 +1,46 @@ +--- +name: qmd +description: MCP tools for qmd +triggers: + - qmd + - search + - retrieve + - full + - document + - bm25 + - vector + - using +--- + +# qmd + +MCP tools for qmd + + + +## Quick Reference + +| Tool | Description | +|------|-------------| +| get | Retrieve the full content of a document by its file path or docid. | +| multi_get | Retrieve multiple documents by glob pattern (e. | +| query | Highest quality search combining BM25 + vector + query expansion + LLM reranking... | +| search | Fast keyword-based full-text search using BM25. | +| status | Show the status of the QMD index: collections, document counts, and health infor... | +| vsearch | Semantic similarity search using vector embeddings. | + +## Usage + +```bash +mcp2cli qmd --params '{...}' +``` + +See `references/` for detailed parameter docs per tool. + + + +## Notes + + + + diff --git a/examples/skills/qmd/references/general-ops.md b/examples/skills/qmd/references/general-ops.md new file mode 100644 index 0000000..b81004d --- /dev/null +++ b/examples/skills/qmd/references/general-ops.md @@ -0,0 +1,72 @@ +# qmd -- General + + + +## query + +Highest quality search combining BM25 + vector + query expansion + LLM reranking. Slower but most accurate. Use for important searches. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| query | string | Yes | Natural language query - describe what you're looking for | +| limit | number | No | Maximum number of results (default: 10) | +| minScore | number | No | Minimum relevance score 0-1 (default: 0) | +| collection | string | No | Filter to a specific collection by name | + +### Example + +```bash +mcp2cli qmd query --params '{"query":"value"}' +``` + +## search + +Fast keyword-based full-text search using BM25. Best for finding documents with specific words or phrases. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| query | string | Yes | Search query - keywords or phrases to find | +| limit | number | No | Maximum number of results (default: 10) | +| minScore | number | No | Minimum relevance score 0-1 (default: 0) | +| collection | string | No | Filter to a specific collection by name | + +### Example + +```bash +mcp2cli qmd search --params '{"query":"value"}' +``` + +## status + +Show the status of the QMD index: collections, document counts, and health information. + +### Example + +```bash +mcp2cli qmd status +``` + +## vsearch + +Semantic similarity search using vector embeddings. Finds conceptually related content even without exact keyword matches. Requires embeddings (run 'qmd embed' first). + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| query | string | Yes | Natural language query - describe what you're looking for | +| limit | number | No | Maximum number of results (default: 10) | +| minScore | number | No | Minimum relevance score 0-1 (default: 0.3) | +| collection | string | No | Filter to a specific collection by name | + +### Example + +```bash +mcp2cli qmd vsearch --params '{"query":"value"}' +``` + + diff --git a/examples/skills/qmd/references/get-ops.md b/examples/skills/qmd/references/get-ops.md new file mode 100644 index 0000000..cbf49e7 --- /dev/null +++ b/examples/skills/qmd/references/get-ops.md @@ -0,0 +1,43 @@ +# qmd -- Get Operations + + + +## get + +Retrieve the full content of a document by its file path or docid. Use paths or docids (#abc123) from search results. Suggests similar files if not found. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| file | string | Yes | File path or docid from search results (e.g., 'pages/meeting.md', '#abc123', or 'pages/meeting.md:100' to start at line 100) | +| fromLine | number | No | Start from this line number (1-indexed) | +| maxLines | number | No | Maximum number of lines to return | +| lineNumbers | boolean | No | Add line numbers to output (format: 'N: content') | + +### Example + +```bash +mcp2cli qmd get --params '{"file":"value"}' +``` + +## multi_get + +Retrieve multiple documents by glob pattern (e.g., 'journals/2025-05*.md') or comma-separated list. Skips files larger than maxBytes. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| pattern | string | Yes | Glob pattern or comma-separated list of file paths | +| maxLines | number | No | Maximum lines per file | +| maxBytes | number | No | Skip files larger than this (default: 10240 = 10KB) | +| lineNumbers | boolean | No | Add line numbers to output (format: 'N: content') | + +### Example + +```bash +mcp2cli qmd multi_get --params '{"pattern":"value"}' +``` + + diff --git a/examples/skills/vaultwarden-secrets/SKILL.md b/examples/skills/vaultwarden-secrets/SKILL.md new file mode 100644 index 0000000..bbbc687 --- /dev/null +++ b/examples/skills/vaultwarden-secrets/SKILL.md @@ -0,0 +1,51 @@ +--- +name: vaultwarden-secrets +description: MCP tools for vaultwarden-secrets +triggers: + - vaultwarden-secrets + - secret + - vault + - name + - item + - secrets + - snapshot + - create +--- + +# vaultwarden-secrets + +MCP tools for vaultwarden-secrets + + + +## Quick Reference + +| Tool | Description | +|------|-------------| +| create_secret | Create a new secret in the vault (Infrastructure folder). | +| delete_secret | Delete a secret from the vault. | +| get_credential | Smart single-call credential lookup. | +| get_secret | Get a secret value by name | +| get_secret_fields | Get all fields for a secret item | +| get_service | Get all vault items for a service (API credentials + per-host entries). | +| list_secrets | List available secrets with optional filter | +| refresh_snapshot | Force a snapshot refresh from the live vault. | +| search_secrets | Fuzzy search for secrets by name | +| snapshot_info | Get vault snapshot metadata (age, item count, staleness) | +| update_secret | Update an existing secret. | + +## Usage + +```bash +mcp2cli vaultwarden-secrets --params '{...}' +``` + +See `references/` for detailed parameter docs per tool. + + + +## Notes + + + + diff --git a/examples/skills/vaultwarden-secrets/references/general-ops.md b/examples/skills/vaultwarden-secrets/references/general-ops.md new file mode 100644 index 0000000..7a9b6aa --- /dev/null +++ b/examples/skills/vaultwarden-secrets/references/general-ops.md @@ -0,0 +1,76 @@ +# vaultwarden-secrets -- General + + + +## get_credential + +Smart single-call credential lookup. Tries exact name match first, falls back to fuzzy search. Returns value, all fields, and item metadata in one response. Use this instead of chaining search_secrets -> get_secret_fields -> get_secret. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| query | string | Yes | Secret name (exact) or search term (fuzzy). Examples: "n8n local", "grafana", "PostgreSQL n8n-ops" | +| field | string | No | Specific field to extract (e.g. "login.password", "login.username", "notes"). Omit to get all fields. | +| vault | string | No | Vault ID (default: "default") | + +### Example + +```bash +mcp2cli vaultwarden-secrets get_credential --params '{"query":"value"}' +``` + +## get_secret_fields + +Get all fields for a secret item + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| name | string | Yes | Secret item name | +| vault | string | No | Vault ID | + +### Example + +```bash +mcp2cli vaultwarden-secrets get_secret_fields --params '{"name":"value"}' +``` + +## get_service + +Get all vault items for a service (API credentials + per-host entries). Uses naming convention: SERVICE_API for shared credentials, service01/02/etc for hosts. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| service | string | Yes | Service name prefix (e.g. "proxmox", "redis", "github") | + +### Example + +```bash +mcp2cli vaultwarden-secrets get_service --params '{"service":"value"}' +``` + +## refresh_snapshot + +Force a snapshot refresh from the live vault. Use when snapshot_info shows stale data. + +### Example + +```bash +mcp2cli vaultwarden-secrets refresh_snapshot +``` + +## snapshot_info + +Get vault snapshot metadata (age, item count, staleness) + +### Example + +```bash +mcp2cli vaultwarden-secrets snapshot_info +``` + + diff --git a/examples/skills/vaultwarden-secrets/references/secret-ops.md b/examples/skills/vaultwarden-secrets/references/secret-ops.md new file mode 100644 index 0000000..64cbd0d --- /dev/null +++ b/examples/skills/vaultwarden-secrets/references/secret-ops.md @@ -0,0 +1,117 @@ +# vaultwarden-secrets -- Secret Operations + + + +## create_secret + +Create a new secret in the vault (Infrastructure folder). Supports login items (type 1) and secure notes (type 2) with custom fields. Triggers snapshot refresh after creation. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| name | string | Yes | Name for the new secret | +| type | number | No | Item type: 1=login (default), 2=secure note. Use type 2 with custom fields for API tokens. | +| username | string | No | Login username (type 1 only) | +| password | string | No | Login password (type 1 only) | +| uri | string | No | Login URI (type 1 only, e.g. https://example.com) | +| notes | string | No | Notes field | +| fields | array | No | Custom fields (e.g. API tokens on secure notes) | + +### Example + +```bash +mcp2cli vaultwarden-secrets create_secret --params '{"name":"value"}' +``` + +## delete_secret + +Delete a secret from the vault. Only secrets in allowed folders can be deleted. Triggers snapshot refresh. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| name | string | Yes | Name of the secret to delete | + +### Example + +```bash +mcp2cli vaultwarden-secrets delete_secret --params '{"name":"value"}' +``` + +## get_secret + +Get a secret value by name + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| name | string | Yes | Secret name or path (e.g. "github-pat", "github-pat.login.password") | +| vault | string | No | Vault ID (default: "default") | + +### Example + +```bash +mcp2cli vaultwarden-secrets get_secret --params '{"name":"value"}' +``` + +## list_secrets + +List available secrets with optional filter + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| filter | string | No | Filter string (case-insensitive) | +| vault | string | No | Vault ID | + +### Example + +```bash +mcp2cli vaultwarden-secrets list_secrets +``` + +## search_secrets + +Fuzzy search for secrets by name + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| query | string | Yes | Search query | +| limit | number | No | Max results | +| vault | string | No | Vault ID (default: "default") | + +### Example + +```bash +mcp2cli vaultwarden-secrets search_secrets --params '{"query":"value"}' +``` + +## update_secret + +Update an existing secret. Supports login fields and custom fields. Only secrets in allowed folders can be modified. Triggers snapshot refresh. + +### Parameters + +| Name | Type | Required | Description | +|------|------|----------|-------------| +| name | string | Yes | Name of the secret to update | +| username | string | No | New username (omit to keep current) | +| password | string | No | New password (omit to keep current) | +| uri | string | No | New URI (omit to keep current) | +| notes | string | No | New notes (omit to keep current) | +| fields | array | No | Custom fields to add/update | +| fieldStrategy | string | No | 'merge' (default): update existing fields by name, append new. 'replace': overwrite all fields. | + +### Example + +```bash +mcp2cli vaultwarden-secrets update_secret --params '{"name":"value"}' +``` + + diff --git a/tests/daemon/observability.test.ts b/tests/daemon/observability.test.ts index e3bee50..ded9178 100644 --- a/tests/daemon/observability.test.ts +++ b/tests/daemon/observability.test.ts @@ -72,16 +72,24 @@ const testConfig = { describe("Daemon Observability", () => { let tempDir: string; + let origCacheDir: string | undefined; let servers: ReturnType[] = []; beforeEach(async () => { tempDir = await mkdtemp(join(tmpdir(), "mcp2cli-obs-test-")); + origCacheDir = process.env.MCP2CLI_CACHE_DIR; + process.env.MCP2CLI_CACHE_DIR = join(tempDir, "schemas"); mockConnectToService.mockClear(); mockCallTool.mockClear(); mockClose.mockClear(); }); afterEach(async () => { + if (origCacheDir !== undefined) { + process.env.MCP2CLI_CACHE_DIR = origCacheDir; + } else { + delete process.env.MCP2CLI_CACHE_DIR; + } resetLogLevel(); for (const s of servers) { try { diff --git a/tests/daemon/pool-hardening.test.ts b/tests/daemon/pool-hardening.test.ts index 6ff378a..b55c5bd 100644 --- a/tests/daemon/pool-hardening.test.ts +++ b/tests/daemon/pool-hardening.test.ts @@ -1,4 +1,7 @@ import { describe, test, expect, beforeEach, mock, afterEach } from "bun:test"; +import { join } from "node:path"; +import { mkdtemp, rm } from "node:fs/promises"; +import { tmpdir } from "node:os"; import type { McpConnection } from "../../src/connection/types.ts"; import type { ServicesConfig } from "../../src/config/index.ts"; @@ -60,6 +63,25 @@ const singleConfig: ServicesConfig = { }, }; +// -- Cache isolation: prevent tests from polluting real cache -- +let testDir: string; +let origCacheDir: string | undefined; + +beforeEach(async () => { + testDir = await mkdtemp(join(tmpdir(), "mcp2cli-pool-hard-test-")); + origCacheDir = process.env.MCP2CLI_CACHE_DIR; + process.env.MCP2CLI_CACHE_DIR = join(testDir, "schemas"); +}); + +afterEach(async () => { + if (origCacheDir !== undefined) { + process.env.MCP2CLI_CACHE_DIR = origCacheDir; + } else { + delete process.env.MCP2CLI_CACHE_DIR; + } + await rm(testDir, { recursive: true, force: true }); +}); + describe("ConnectionPool Hardening", () => { let pool: InstanceType; const originalEnv = process.env.MCP2CLI_POOL_MAX; diff --git a/tests/daemon/pool.test.ts b/tests/daemon/pool.test.ts index c6785ae..74102d3 100644 --- a/tests/daemon/pool.test.ts +++ b/tests/daemon/pool.test.ts @@ -1,4 +1,7 @@ -import { describe, test, expect, beforeEach, mock } from "bun:test"; +import { describe, test, expect, beforeEach, afterEach, mock } from "bun:test"; +import { join } from "node:path"; +import { mkdtemp, rm } from "node:fs/promises"; +import { tmpdir } from "node:os"; import type { McpConnection } from "../../src/connection/types.ts"; import type { ServicesConfig } from "../../src/config/index.ts"; @@ -19,6 +22,25 @@ mock.module("../../src/connection/index.ts", () => ({ // Import pool AFTER mocking const { ConnectionPool } = await import("../../src/daemon/pool.ts"); +// -- Cache isolation: prevent tests from polluting real cache -- +let testDir: string; +let origCacheDir: string | undefined; + +beforeEach(async () => { + testDir = await mkdtemp(join(tmpdir(), "mcp2cli-pool-test-")); + origCacheDir = process.env.MCP2CLI_CACHE_DIR; + process.env.MCP2CLI_CACHE_DIR = join(testDir, "schemas"); +}); + +afterEach(async () => { + if (origCacheDir !== undefined) { + process.env.MCP2CLI_CACHE_DIR = origCacheDir; + } else { + delete process.env.MCP2CLI_CACHE_DIR; + } + await rm(testDir, { recursive: true, force: true }); +}); + const testConfig: ServicesConfig = { services: { "test-svc": { diff --git a/tests/daemon/timeout.test.ts b/tests/daemon/timeout.test.ts index d300cae..6ca29f5 100644 --- a/tests/daemon/timeout.test.ts +++ b/tests/daemon/timeout.test.ts @@ -33,6 +33,7 @@ function runCli( MCP2CLI_PID_FILE: join(tempDir, "daemon.pid"), MCP2CLI_SOCKET_PATH: join(tempDir, "daemon.sock"), MCP2CLI_IDLE_TIMEOUT: "10", + MCP2CLI_CACHE_DIR: join(tempDir, "schemas"), ...extraEnv, }; From 1cd83dfd92cc03503b410d0bf1f32ba6e236374f Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Mon, 9 Mar 2026 19:19:23 -0400 Subject: [PATCH 3/6] feat: add WebSocket transport and batch command WebSocket transport: - New "websocket" backend type in services.json config - Uses SDK's built-in WebSocketClientTransport (persistent connection) - Fallback to stdio with circuit breaker (same as HTTP) - Wired into daemon pool, schema, service-help, tool-call, generate-skills Batch command: - `mcp2cli batch` reads NDJSON tool calls from stdin - Executes sequentially by default, --parallel for concurrent execution - Outputs NDJSON results with service.tool correlation - Individual errors don't abort the batch 703 tests, 0 failures, 0 TypeScript errors across 81 source files. Co-Authored-By: Claude Opus 4.6 --- README.md | 49 +++++ examples/services-websocket.json | 13 ++ src/cli/commands/batch.ts | 180 +++++++++++++++++++ src/cli/commands/generate-skills.ts | 7 +- src/cli/commands/schema.ts | 8 +- src/cli/commands/service-help.ts | 8 +- src/cli/commands/tool-call.ts | 8 +- src/cli/help.ts | 7 + src/cli/index.ts | 2 + src/config/index.ts | 2 + src/config/schema.ts | 16 ++ src/connection/index.ts | 1 + src/connection/websocket-transport.ts | 68 +++++++ src/daemon/pool.ts | 72 +++++++- tests/cli/batch.test.ts | 153 ++++++++++++++++ tests/connection/websocket-transport.test.ts | 164 +++++++++++++++++ 16 files changed, 745 insertions(+), 13 deletions(-) create mode 100644 examples/services-websocket.json create mode 100644 src/cli/commands/batch.ts create mode 100644 src/connection/websocket-transport.ts create mode 100644 tests/cli/batch.test.ts create mode 100644 tests/connection/websocket-transport.test.ts diff --git a/README.md b/README.md index 89522e3..5fa3612 100644 --- a/README.md +++ b/README.md @@ -308,6 +308,55 @@ mcp2cli grep "delete|remove" This searches cached schemas only -- no MCP connections are made. +### WebSocket Transport + +Connect to MCP servers over WebSocket. Supports optional stdio fallback and access control, same as HTTP. + +```json +{ + "services": { + "remote-mcp": { + "description": "Remote MCP server via WebSocket", + "backend": "websocket", + "url": "ws://mcp-gateway.local:3000/mcp", + "fallback": { + "command": "npx", + "args": ["-y", "@anthropic/n8n-mcp"] + } + } + } +} +``` + +WebSocket services benefit from the same circuit breaker and fallback behavior as HTTP services. + +### Batch Tool Calls + +Execute multiple tool calls in a single invocation by piping NDJSON to `mcp2cli batch`. Each line is a JSON object with `service`, `tool`, and `params` fields: + +```bash +# Sequential execution (default) +cat <; +} + +/** A single result line written to stdout. */ +export interface BatchResult { + service: string; + tool: string; + success: boolean; + result?: unknown; + error?: { code: string; message: string }; +} + +/** + * Parse NDJSON lines into BatchCallSpec objects. + * Skips blank lines. Returns parse errors inline so they don't abort the batch. + */ +export function parseBatchInput(input: string): Array<{ spec?: BatchCallSpec; error?: string; line: number }> { + const lines = input.split("\n"); + const results: Array<{ spec?: BatchCallSpec; error?: string; line: number }> = []; + + for (let i = 0; i < lines.length; i++) { + const trimmed = lines[i]!.trim(); + if (trimmed === "") continue; + + try { + const parsed = JSON.parse(trimmed); + if (!parsed.service || !parsed.tool) { + results.push({ error: `Missing required fields "service" and/or "tool"`, line: i + 1 }); + continue; + } + results.push({ + spec: { + service: parsed.service, + tool: parsed.tool, + params: parsed.params ?? {}, + }, + line: i + 1, + }); + } catch { + results.push({ error: `Invalid JSON on line ${i + 1}`, line: i + 1 }); + } + } + + return results; +} + +/** + * Execute a single batch call via the daemon. + * Returns a BatchResult -- never throws. + */ +async function executeSingle(spec: BatchCallSpec): Promise { + try { + const response = await callViaDaemon({ + service: spec.service, + tool: spec.tool, + params: spec.params, + }); + + if (response.success) { + return { + service: spec.service, + tool: spec.tool, + success: true, + result: response.result, + }; + } + + return { + service: spec.service, + tool: spec.tool, + success: false, + error: response.error, + }; + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return { + service: spec.service, + tool: spec.tool, + success: false, + error: { code: "INTERNAL_ERROR", message }, + }; + } +} + +/** + * Execute batch calls sequentially, writing each result to stdout as NDJSON. + */ +async function executeSequential(specs: BatchCallSpec[]): Promise { + for (const spec of specs) { + const result = await executeSingle(spec); + console.log(JSON.stringify(result)); + } +} + +/** + * Execute batch calls in parallel, writing results to stdout as NDJSON. + * Order of output matches order of input. + */ +async function executeParallel(specs: BatchCallSpec[]): Promise { + const results = await Promise.all(specs.map(executeSingle)); + for (const result of results) { + console.log(JSON.stringify(result)); + } +} + +/** + * Read all stdin until EOF. Returns the full input as a string. + */ +async function readStdin(): Promise { + const chunks: Buffer[] = []; + for await (const chunk of process.stdin) { + chunks.push(chunk as Buffer); + } + return Buffer.concat(chunks).toString("utf-8"); +} + +/** + * Handle the `mcp2cli batch` command. + * Reads NDJSON from stdin, executes calls, outputs results as NDJSON. + */ +export async function handleBatch(args: string[]): Promise { + const parallel = args.includes("--parallel"); + + log.info("batch_start", { parallel }); + + const input = await readStdin(); + + if (input.trim() === "") { + log.info("batch_empty_input"); + return; + } + + const parsed = parseBatchInput(input); + const specs: BatchCallSpec[] = []; + + // Emit parse errors as result lines, collect valid specs + for (const entry of parsed) { + if (entry.error) { + const errorResult: BatchResult = { + service: "unknown", + tool: "unknown", + success: false, + error: { code: "INPUT_VALIDATION_ERROR", message: entry.error }, + }; + console.log(JSON.stringify(errorResult)); + continue; + } + if (entry.spec) { + specs.push(entry.spec); + } + } + + if (specs.length === 0) { + log.info("batch_no_valid_specs"); + return; + } + + log.info("batch_executing", { count: specs.length, parallel }); + + if (parallel) { + await executeParallel(specs); + } else { + await executeSequential(specs); + } + + log.info("batch_complete", { count: specs.length }); +} diff --git a/src/cli/commands/generate-skills.ts b/src/cli/commands/generate-skills.ts index a705bbd..959285e 100644 --- a/src/cli/commands/generate-skills.ts +++ b/src/cli/commands/generate-skills.ts @@ -3,6 +3,7 @@ import { loadConfig, getConfigPath } from "../../config/index.ts"; import { ConfigError } from "../../config/errors.ts"; import { connectToService } from "../../connection/client.ts"; import { connectToHttpService } from "../../connection/http-transport.ts"; +import { connectToWebSocketService } from "../../connection/websocket-transport.ts"; import { listToolsForService, getToolSchema } from "../../schema/introspect.ts"; import { detectPrefixGroups, @@ -183,10 +184,12 @@ export const handleGenerateSkills = async (args: string[]): Promise => { return; } - // Connect to MCP server (stdio or http) + // Connect to MCP server (stdio, http, or websocket) const connection = service.backend === "http" ? await connectToHttpService(service) - : await connectToService(service); + : service.backend === "websocket" + ? await connectToWebSocketService(service) + : await connectToService(service); try { // List all tools diff --git a/src/cli/commands/schema.ts b/src/cli/commands/schema.ts index 0215305..f6ea2ae 100644 --- a/src/cli/commands/schema.ts +++ b/src/cli/commands/schema.ts @@ -5,7 +5,7 @@ * Supports --fresh flag to bypass cache for one call. */ import { loadConfig } from "../../config/index.ts"; -import { connectToService, connectToHttpService } from "../../connection/index.ts"; +import { connectToService, connectToHttpService, connectToWebSocketService } from "../../connection/index.ts"; import { getSchemaViaDaemon } from "../../process/index.ts"; import { parseDotNotation, @@ -141,10 +141,12 @@ export const handleSchema: CommandHandler = async (args: string[]) => { // Direct path (MCP2CLI_NO_DAEMON=1): legacy direct connection - // Connect and get schema (stdio or http) + // Connect and get schema (stdio, http, or websocket) const connection = service.backend === "http" ? await connectToHttpService(service) - : await connectToService(service); + : service.backend === "websocket" + ? await connectToWebSocketService(service) + : await connectToService(service); try { const result = await getToolSchema(connection.client, toolName, serviceName); diff --git a/src/cli/commands/service-help.ts b/src/cli/commands/service-help.ts index ffc089e..9d45846 100644 --- a/src/cli/commands/service-help.ts +++ b/src/cli/commands/service-help.ts @@ -3,7 +3,7 @@ * Routes through daemon by default. Set MCP2CLI_NO_DAEMON=1 for direct connection. */ import { loadConfig } from "../../config/index.ts"; -import { connectToService, connectToHttpService } from "../../connection/index.ts"; +import { connectToService, connectToHttpService, connectToWebSocketService } from "../../connection/index.ts"; import { listToolsViaDaemon } from "../../process/index.ts"; import { listToolsForService, formatToolListing } from "../../schema/index.ts"; import type { ToolListing, ToolSummary } from "../../schema/index.ts"; @@ -64,10 +64,12 @@ export async function handleServiceHelp( // Direct path (MCP2CLI_NO_DAEMON=1): legacy direct connection - // Connect and introspect (stdio or http) + // Connect and introspect (stdio, http, or websocket) const connection = service.backend === "http" ? await connectToHttpService(service) - : await connectToService(service); + : service.backend === "websocket" + ? await connectToWebSocketService(service) + : await connectToService(service); try { const allTools = await listToolsForService(connection.client); diff --git a/src/cli/commands/tool-call.ts b/src/cli/commands/tool-call.ts index eb592e0..bf1c0a9 100644 --- a/src/cli/commands/tool-call.ts +++ b/src/cli/commands/tool-call.ts @@ -7,7 +7,7 @@ import { } from "../../invocation/index.ts"; import { validationResultToCliError } from "../../validation/pipelines.ts"; import { loadConfig } from "../../config/index.ts"; -import { connectToService, connectToHttpService } from "../../connection/index.ts"; +import { connectToService, connectToHttpService, connectToWebSocketService } from "../../connection/index.ts"; import { callViaDaemon, getSchemaViaDaemon } from "../../process/index.ts"; import { getToolSchema } from "../../schema/introspect.ts"; import { checkToolAccess, extractPolicy } from "../../access/index.ts"; @@ -158,10 +158,12 @@ export async function handleToolCall(args: string[]): Promise { // Direct path (MCP2CLI_NO_DAEMON=1): legacy direct connection - // 4. Connect to MCP server (stdio or http) + // 4. Connect to MCP server (stdio, http, or websocket) const connection = service.backend === "http" ? await connectToHttpService(service) - : await connectToService(service); + : service.backend === "websocket" + ? await connectToWebSocketService(service) + : await connectToService(service); try { // Dry-run interception (inside try/finally so connection closes) diff --git a/src/cli/help.ts b/src/cli/help.ts index b3b00c6..e29ea83 100644 --- a/src/cli/help.ts +++ b/src/cli/help.ts @@ -67,6 +67,11 @@ export function printHelp(args?: string[]): void { description: "Search tool names and descriptions across cached services", usage: 'mcp2cli grep "pattern"', }, + { + name: "batch", + description: "Execute multiple tool calls from NDJSON stdin", + usage: "echo '{\"service\":\"n8n\",\"tool\":\"n8n_list_workflows\",\"params\":{}}' | mcp2cli batch [--parallel]", + }, ], examples: [ "mcp2cli services", @@ -74,6 +79,7 @@ export function printHelp(args?: string[]): void { "mcp2cli n8n n8n_list_workflows", "mcp2cli schema n8n.n8n_list_workflows", 'mcp2cli n8n n8n_create_workflow --params \'{"name": "test"}\'', + 'echo \'{"service":"n8n","tool":"n8n_list_workflows","params":{}}\' | mcp2cli batch', ], }), ); @@ -93,6 +99,7 @@ export function printHelp(args?: string[]): void { " generate-skills Generate skill files from service schemas", " cache Manage schema cache (clear, status)", ' grep Search tool names/descriptions across cached services', + " batch Execute multiple tool calls from NDJSON stdin", "", "EXAMPLES:", " mcp2cli services", diff --git a/src/cli/index.ts b/src/cli/index.ts index f258b4d..8663374 100644 --- a/src/cli/index.ts +++ b/src/cli/index.ts @@ -11,6 +11,7 @@ import { handleBootstrap } from "./commands/bootstrap.ts"; import { handleGenerateSkills } from "./commands/generate-skills.ts"; import { handleCache } from "./commands/cache.ts"; import { handleGrep } from "./commands/grep.ts"; +import { handleBatch } from "./commands/batch.ts"; import { ConfigError } from "../config/index.ts"; import { ConnectionError } from "../connection/index.ts"; import { ToolError } from "../invocation/errors.ts"; @@ -44,6 +45,7 @@ const COMMANDS: Record = { schema: handleSchema, cache: handleCache, grep: handleGrep, + batch: handleBatch, daemon: handleDaemonDispatch, bootstrap: handleBootstrap, "generate-skills": handleGenerateSkills, diff --git a/src/config/index.ts b/src/config/index.ts index 3c1345c..e3e23ba 100644 --- a/src/config/index.ts +++ b/src/config/index.ts @@ -2,6 +2,7 @@ export { StdioServiceSchema, StdioFallbackSchema, HttpServiceSchema, + WebSocketServiceSchema, ServiceSchema, ServicesConfigSchema, } from "./schema.ts"; @@ -10,6 +11,7 @@ export type { StdioService, StdioFallback, HttpService, + WebSocketService, ServiceConfig, ServicesConfig, } from "./schema.ts"; diff --git a/src/config/schema.ts b/src/config/schema.ts index 79bdb96..a2f36b1 100644 --- a/src/config/schema.ts +++ b/src/config/schema.ts @@ -48,6 +48,20 @@ export const HttpServiceSchema = z.object({ ...accessControlFields, }); +/** + * WebSocket-based MCP service configuration. + * Connects to a remote MCP server over WebSocket. + * Optional fallback launches a local stdio process when the server is unreachable. + */ +export const WebSocketServiceSchema = z.object({ + description: z.string().optional(), + backend: z.literal("websocket"), + url: z.string().url(), + headers: z.record(z.string(), z.string()).optional().default({}), + fallback: StdioFallbackSchema.optional(), + ...accessControlFields, +}); + /** * Discriminated union of all supported service backends. * The "backend" field determines which schema variant applies. @@ -55,6 +69,7 @@ export const HttpServiceSchema = z.object({ export const ServiceSchema = z.discriminatedUnion("backend", [ StdioServiceSchema, HttpServiceSchema, + WebSocketServiceSchema, ]); /** @@ -73,5 +88,6 @@ export const ServicesConfigSchema = z.object({ export type StdioService = z.infer; export type StdioFallback = z.infer; export type HttpService = z.infer; +export type WebSocketService = z.infer; export type ServiceConfig = z.infer; export type ServicesConfig = z.infer; diff --git a/src/connection/index.ts b/src/connection/index.ts index 68008c2..2025341 100644 --- a/src/connection/index.ts +++ b/src/connection/index.ts @@ -3,4 +3,5 @@ export { isJsonRpcLine } from "./filter.ts"; export { McpTransport } from "./transport.ts"; export { connectToService } from "./client.ts"; export { connectToHttpService } from "./http-transport.ts"; +export { connectToWebSocketService } from "./websocket-transport.ts"; export type { ConnectionOptions, McpConnection } from "./types.ts"; diff --git a/src/connection/websocket-transport.ts b/src/connection/websocket-transport.ts new file mode 100644 index 0000000..01551c2 --- /dev/null +++ b/src/connection/websocket-transport.ts @@ -0,0 +1,68 @@ +/** + * WebSocket transport for connecting to remote MCP servers. + * Uses the SDK's built-in WebSocketClientTransport. + */ +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; +import { WebSocketClientTransport } from "@modelcontextprotocol/sdk/client/websocket.js"; +import { ConnectionError } from "./errors.ts"; +import type { McpConnection } from "./types.ts"; +import type { WebSocketService } from "../config/schema.ts"; +import { createLogger } from "../logger/index.ts"; +import pkg from "../../package.json" with { type: "json" }; + +const log = createLogger("websocket-transport"); + +/** + * Connect to an MCP server via WebSocket transport. + * Wraps the SDK's WebSocketClientTransport with timeout and error handling. + */ +export async function connectToWebSocketService( + service: WebSocketService, + options?: { timeout?: number }, +): Promise { + const timeout = options?.timeout ?? 30000; + const url = new URL(service.url); + + try { + log.info("connecting_websocket", { url: service.url }); + const transport = new WebSocketClientTransport(url); + const client = new Client({ name: "mcp2cli", version: pkg.version }); + + await Promise.race([ + client.connect(transport), + rejectAfter(timeout, service.url), + ]); + + log.info("connected_websocket", { url: service.url }); + return { + client, + close: async () => { + await client.close(); + }, + }; + } catch (err) { + if (err instanceof ConnectionError) throw err; + const message = err instanceof Error ? err.message : String(err); + log.error("websocket_connect_failed", { url: service.url, error: message }); + throw new ConnectionError( + `Failed to connect to WebSocket MCP server: ${message}`, + `url: ${service.url}`, + ); + } +} + +/** Create a timeout rejection promise. */ +function rejectAfter(ms: number, url: string): Promise { + return new Promise((_, reject) => + setTimeout( + () => + reject( + new ConnectionError( + `WebSocket connection timed out after ${ms}ms`, + `url: ${url}`, + ), + ), + ms, + ), + ); +} diff --git a/src/daemon/pool.ts b/src/daemon/pool.ts index 9450784..c42d2db 100644 --- a/src/daemon/pool.ts +++ b/src/daemon/pool.ts @@ -4,10 +4,10 @@ * MEM-04: Bounded pool size (default 50, configurable via MCP2CLI_POOL_MAX). * MEM-05: Health check before reuse -- stale connections are replaced. */ -import { connectToService, connectToHttpService } from "../connection/index.ts"; +import { connectToService, connectToHttpService, connectToWebSocketService } from "../connection/index.ts"; import { ConnectionError } from "../connection/errors.ts"; import type { McpConnection } from "../connection/types.ts"; -import type { ServicesConfig, HttpService } from "../config/index.ts"; +import type { ServicesConfig, HttpService, WebSocketService } from "../config/index.ts"; import { createLogger } from "../logger/index.ts"; import { checkDriftOnConnect } from "./drift-hook.ts"; import { extractPolicy } from "../access/filter.ts"; @@ -102,6 +102,8 @@ export class ConnectionPool { let connectFn: () => Promise; if (serviceConfig.backend === "http") { connectFn = () => this.connectHttpWithFallback(serviceName, serviceConfig); + } else if (serviceConfig.backend === "websocket") { + connectFn = () => this.connectWebSocketWithFallback(serviceName, serviceConfig); } else if (serviceConfig.backend === "stdio") { connectFn = () => connectToService(serviceConfig); } else { @@ -165,6 +167,72 @@ export class ConnectionPool { return Array.from(this.connections.keys()); } + /** + * Connect to a WebSocket service with circuit breaker and stdio fallback. + * Mirrors the HTTP fallback pattern for consistency. + */ + private async connectWebSocketWithFallback( + serviceName: string, + serviceConfig: WebSocketService, + ): Promise { + const hasFallback = !!serviceConfig.fallback; + const attemptWs = await shouldAttemptHttp(serviceName); + + if (!attemptWs) { + if (hasFallback) { + log.warn("fallback_circuit_open", { + service: serviceName, + url: serviceConfig.url, + }); + return this.connectWsFallback(serviceName, serviceConfig); + } + throw new ConnectionError( + `Circuit breaker open for ${serviceName} and no fallback configured`, + `url: ${serviceConfig.url}`, + ); + } + + try { + const connection = await connectToWebSocketService(serviceConfig); + await recordSuccess(serviceName); + return connection; + } catch (err) { + await recordFailure(serviceName); + const message = err instanceof Error ? err.message : String(err); + + if (hasFallback) { + log.warn("fallback_ws_failed", { + service: serviceName, + url: serviceConfig.url, + error: message, + }); + return this.connectWsFallback(serviceName, serviceConfig); + } + + throw err; + } + } + + /** + * Connect via the stdio fallback defined in a WebSocket service config. + */ + private async connectWsFallback( + serviceName: string, + serviceConfig: WebSocketService, + ): Promise { + const fb = serviceConfig.fallback!; + log.warn("using_stdio_fallback", { + service: serviceName, + command: fb.command, + }); + return connectToService({ + backend: "stdio" as const, + command: fb.command, + args: fb.args, + env: fb.env, + }); + } + /** * INFRA-01/02: Connect to an HTTP service with circuit breaker and stdio fallback. * 1. If circuit is open, skip HTTP and go directly to fallback. diff --git a/tests/cli/batch.test.ts b/tests/cli/batch.test.ts new file mode 100644 index 0000000..6107886 --- /dev/null +++ b/tests/cli/batch.test.ts @@ -0,0 +1,153 @@ +import { describe, test, expect } from "bun:test"; +import { parseBatchInput } from "../../src/cli/commands/batch.ts"; +import type { BatchResult } from "../../src/cli/commands/batch.ts"; + +describe("parseBatchInput", () => { + test("parses valid NDJSON lines", () => { + const input = [ + '{"service": "n8n", "tool": "n8n_list_workflows", "params": {}}', + '{"service": "n8n", "tool": "n8n_get_workflow", "params": {"id": "1"}}', + ].join("\n"); + + const results = parseBatchInput(input); + expect(results).toHaveLength(2); + expect(results[0]!.spec).toEqual({ + service: "n8n", + tool: "n8n_list_workflows", + params: {}, + }); + expect(results[1]!.spec).toEqual({ + service: "n8n", + tool: "n8n_get_workflow", + params: { id: "1" }, + }); + }); + + test("skips blank lines", () => { + const input = [ + '{"service": "n8n", "tool": "n8n_list_workflows", "params": {}}', + "", + " ", + '{"service": "n8n", "tool": "n8n_get_workflow", "params": {}}', + ].join("\n"); + + const results = parseBatchInput(input); + expect(results).toHaveLength(2); + expect(results[0]!.spec!.tool).toBe("n8n_list_workflows"); + expect(results[1]!.spec!.tool).toBe("n8n_get_workflow"); + }); + + test("handles empty input", () => { + const results = parseBatchInput(""); + expect(results).toHaveLength(0); + }); + + test("handles whitespace-only input", () => { + const results = parseBatchInput(" \n \n "); + expect(results).toHaveLength(0); + }); + + test("reports invalid JSON lines as errors", () => { + const input = [ + '{"service": "n8n", "tool": "n8n_list_workflows", "params": {}}', + "not valid json", + '{"service": "n8n", "tool": "n8n_get_workflow", "params": {}}', + ].join("\n"); + + const results = parseBatchInput(input); + expect(results).toHaveLength(3); + expect(results[0]!.spec).toBeDefined(); + expect(results[1]!.error).toContain("Invalid JSON"); + expect(results[1]!.line).toBe(2); + expect(results[2]!.spec).toBeDefined(); + }); + + test("reports missing service field as error", () => { + const input = '{"tool": "n8n_list_workflows", "params": {}}'; + const results = parseBatchInput(input); + expect(results).toHaveLength(1); + expect(results[0]!.error).toContain("Missing required fields"); + }); + + test("reports missing tool field as error", () => { + const input = '{"service": "n8n", "params": {}}'; + const results = parseBatchInput(input); + expect(results).toHaveLength(1); + expect(results[0]!.error).toContain("Missing required fields"); + }); + + test("defaults params to empty object when missing", () => { + const input = '{"service": "n8n", "tool": "n8n_list_workflows"}'; + const results = parseBatchInput(input); + expect(results).toHaveLength(1); + expect(results[0]!.spec!.params).toEqual({}); + }); + + test("preserves line numbers for error reporting", () => { + const input = [ + '{"service": "n8n", "tool": "tool1", "params": {}}', + "", + "bad json", + '{"service": "n8n", "tool": "tool2", "params": {}}', + ].join("\n"); + + const results = parseBatchInput(input); + expect(results).toHaveLength(3); + expect(results[0]!.line).toBe(1); + // Line 2 is blank, skipped + expect(results[1]!.line).toBe(3); + expect(results[1]!.error).toBeDefined(); + expect(results[2]!.line).toBe(4); + }); + + test("handles mixed valid and invalid lines without aborting", () => { + const input = [ + '{"service": "a", "tool": "t1", "params": {}}', + "garbage", + '{"service": "b", "tool": "t2", "params": {"x": 1}}', + '{"no_service": true}', + '{"service": "c", "tool": "t3", "params": {}}', + ].join("\n"); + + const results = parseBatchInput(input); + expect(results).toHaveLength(5); + + // Valid entries + const validSpecs = results.filter((r) => r.spec).map((r) => r.spec!); + expect(validSpecs).toHaveLength(3); + expect(validSpecs[0]!.service).toBe("a"); + expect(validSpecs[1]!.service).toBe("b"); + expect(validSpecs[2]!.service).toBe("c"); + + // Error entries + const errors = results.filter((r) => r.error); + expect(errors).toHaveLength(2); + }); +}); + +describe("BatchResult type conformance", () => { + test("success result has expected shape", () => { + const result: BatchResult = { + service: "n8n", + tool: "n8n_list_workflows", + success: true, + result: { workflows: [] }, + }; + expect(result.success).toBe(true); + expect(result.service).toBe("n8n"); + expect(result.tool).toBe("n8n_list_workflows"); + expect(result.result).toEqual({ workflows: [] }); + }); + + test("error result has expected shape", () => { + const result: BatchResult = { + service: "n8n", + tool: "n8n_get_workflow", + success: false, + error: { code: "TOOL_ERROR", message: "Not found" }, + }; + expect(result.success).toBe(false); + expect(result.error).toBeDefined(); + expect(result.error!.code).toBe("TOOL_ERROR"); + }); +}); diff --git a/tests/connection/websocket-transport.test.ts b/tests/connection/websocket-transport.test.ts new file mode 100644 index 0000000..da88d28 --- /dev/null +++ b/tests/connection/websocket-transport.test.ts @@ -0,0 +1,164 @@ +import { describe, test, expect } from "bun:test"; +import { + WebSocketServiceSchema, + ServiceSchema, + ServicesConfigSchema, +} from "../../src/config/schema.ts"; + +describe("WebSocketService config validation", () => { + test("valid WebSocket service with url only", () => { + const result = WebSocketServiceSchema.safeParse({ + backend: "websocket", + url: "ws://mcp-gateway.local:3000/mcp", + }); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.backend).toBe("websocket"); + expect(result.data.url).toBe("ws://mcp-gateway.local:3000/mcp"); + expect(result.data.headers).toEqual({}); + } + }); + + test("valid WebSocket service with headers", () => { + const result = WebSocketServiceSchema.safeParse({ + backend: "websocket", + url: "wss://mcp-gateway.local:3000/mcp", + headers: { Authorization: "Bearer tok_123" }, + }); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.headers).toEqual({ Authorization: "Bearer tok_123" }); + } + }); + + test("valid WebSocket service with description and fallback", () => { + const result = WebSocketServiceSchema.safeParse({ + backend: "websocket", + url: "ws://localhost:3000/mcp", + description: "Remote MCP via WebSocket", + fallback: { + command: "npx", + args: ["-y", "@anthropic/n8n-mcp"], + }, + }); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.description).toBe("Remote MCP via WebSocket"); + expect(result.data.fallback).toBeDefined(); + expect(result.data.fallback!.command).toBe("npx"); + } + }); + + test("missing url fails", () => { + const result = WebSocketServiceSchema.safeParse({ + backend: "websocket", + }); + expect(result.success).toBe(false); + }); + + test("invalid url fails", () => { + const result = WebSocketServiceSchema.safeParse({ + backend: "websocket", + url: "not-a-valid-url", + }); + expect(result.success).toBe(false); + }); + + test("empty string url fails", () => { + const result = WebSocketServiceSchema.safeParse({ + backend: "websocket", + url: "", + }); + expect(result.success).toBe(false); + }); + + test("access control fields are accepted", () => { + const result = WebSocketServiceSchema.safeParse({ + backend: "websocket", + url: "ws://localhost:3000/mcp", + allowTools: ["tool_*"], + blockTools: ["tool_delete_*"], + }); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.allowTools).toEqual(["tool_*"]); + expect(result.data.blockTools).toEqual(["tool_delete_*"]); + } + }); +}); + +describe("ServiceSchema accepts websocket backend", () => { + test("websocket service via discriminated union", () => { + const result = ServiceSchema.safeParse({ + backend: "websocket", + url: "ws://localhost:3000/mcp", + }); + expect(result.success).toBe(true); + if (result.success) { + expect(result.data.backend).toBe("websocket"); + } + }); +}); + +describe("ServicesConfig with websocket backend", () => { + test("config with stdio + http + websocket services", () => { + const result = ServicesConfigSchema.safeParse({ + services: { + n8n: { + backend: "stdio", + command: "npx", + args: ["-y", "n8n-mcp"], + }, + vault: { + backend: "http", + url: "http://10.71.20.14:3001/mcp", + }, + remote: { + backend: "websocket", + url: "ws://mcp-gateway.local:3000/mcp", + }, + }, + }); + expect(result.success).toBe(true); + if (result.success) { + expect(Object.keys(result.data.services)).toHaveLength(3); + const remote = result.data.services.remote; + expect(remote).toBeDefined(); + expect(remote!.backend).toBe("websocket"); + if (remote!.backend === "websocket") { + expect(remote!.url).toBe("ws://mcp-gateway.local:3000/mcp"); + } + } + }); + + test("websocket-only config is valid", () => { + const result = ServicesConfigSchema.safeParse({ + services: { + remote: { + backend: "websocket", + url: "ws://localhost:3000/mcp", + }, + }, + }); + expect(result.success).toBe(true); + }); +}); + +describe("WebSocket transport module imports", () => { + test("connectToWebSocketService is exported from connection index", async () => { + const mod = await import("../../src/connection/index.ts"); + expect(typeof mod.connectToWebSocketService).toBe("function"); + }); + + test("connectToWebSocketService function exists in websocket-transport module", async () => { + const mod = await import("../../src/connection/websocket-transport.ts"); + expect(typeof mod.connectToWebSocketService).toBe("function"); + }); +}); + +describe("WebSocket config type export", () => { + test("WebSocketServiceSchema is exported from config index", async () => { + const mod = await import("../../src/config/index.ts"); + expect(mod.WebSocketServiceSchema).toBeDefined(); + }); +}); From 61de61f5ae1e1e5e3f25534a60d3dcdd12d19550 Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Mon, 9 Mar 2026 19:26:48 -0400 Subject: [PATCH 4/6] fix: use direct imports for WebSocket transport to avoid CI module resolution failure The barrel re-export from connection/index.ts caused a cascading module evaluation failure in CI when the SDK's WebSocket module couldn't load. Direct imports from websocket-transport.ts avoid this -- callers only load the module when a websocket backend is actually configured. Co-Authored-By: Claude Opus 4.6 --- src/cli/commands/schema.ts | 3 ++- src/cli/commands/service-help.ts | 3 ++- src/cli/commands/tool-call.ts | 3 ++- src/connection/index.ts | 1 - src/daemon/pool.ts | 3 ++- tests/connection/websocket-transport.test.ts | 5 ----- 6 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/cli/commands/schema.ts b/src/cli/commands/schema.ts index f6ea2ae..6d0ff81 100644 --- a/src/cli/commands/schema.ts +++ b/src/cli/commands/schema.ts @@ -5,7 +5,8 @@ * Supports --fresh flag to bypass cache for one call. */ import { loadConfig } from "../../config/index.ts"; -import { connectToService, connectToHttpService, connectToWebSocketService } from "../../connection/index.ts"; +import { connectToService, connectToHttpService } from "../../connection/index.ts"; +import { connectToWebSocketService } from "../../connection/websocket-transport.ts"; import { getSchemaViaDaemon } from "../../process/index.ts"; import { parseDotNotation, diff --git a/src/cli/commands/service-help.ts b/src/cli/commands/service-help.ts index 9d45846..12605fd 100644 --- a/src/cli/commands/service-help.ts +++ b/src/cli/commands/service-help.ts @@ -3,7 +3,8 @@ * Routes through daemon by default. Set MCP2CLI_NO_DAEMON=1 for direct connection. */ import { loadConfig } from "../../config/index.ts"; -import { connectToService, connectToHttpService, connectToWebSocketService } from "../../connection/index.ts"; +import { connectToService, connectToHttpService } from "../../connection/index.ts"; +import { connectToWebSocketService } from "../../connection/websocket-transport.ts"; import { listToolsViaDaemon } from "../../process/index.ts"; import { listToolsForService, formatToolListing } from "../../schema/index.ts"; import type { ToolListing, ToolSummary } from "../../schema/index.ts"; diff --git a/src/cli/commands/tool-call.ts b/src/cli/commands/tool-call.ts index bf1c0a9..c806624 100644 --- a/src/cli/commands/tool-call.ts +++ b/src/cli/commands/tool-call.ts @@ -7,7 +7,8 @@ import { } from "../../invocation/index.ts"; import { validationResultToCliError } from "../../validation/pipelines.ts"; import { loadConfig } from "../../config/index.ts"; -import { connectToService, connectToHttpService, connectToWebSocketService } from "../../connection/index.ts"; +import { connectToService, connectToHttpService } from "../../connection/index.ts"; +import { connectToWebSocketService } from "../../connection/websocket-transport.ts"; import { callViaDaemon, getSchemaViaDaemon } from "../../process/index.ts"; import { getToolSchema } from "../../schema/introspect.ts"; import { checkToolAccess, extractPolicy } from "../../access/index.ts"; diff --git a/src/connection/index.ts b/src/connection/index.ts index 2025341..68008c2 100644 --- a/src/connection/index.ts +++ b/src/connection/index.ts @@ -3,5 +3,4 @@ export { isJsonRpcLine } from "./filter.ts"; export { McpTransport } from "./transport.ts"; export { connectToService } from "./client.ts"; export { connectToHttpService } from "./http-transport.ts"; -export { connectToWebSocketService } from "./websocket-transport.ts"; export type { ConnectionOptions, McpConnection } from "./types.ts"; diff --git a/src/daemon/pool.ts b/src/daemon/pool.ts index c42d2db..c276dd2 100644 --- a/src/daemon/pool.ts +++ b/src/daemon/pool.ts @@ -4,7 +4,8 @@ * MEM-04: Bounded pool size (default 50, configurable via MCP2CLI_POOL_MAX). * MEM-05: Health check before reuse -- stale connections are replaced. */ -import { connectToService, connectToHttpService, connectToWebSocketService } from "../connection/index.ts"; +import { connectToService, connectToHttpService } from "../connection/index.ts"; +import { connectToWebSocketService } from "../connection/websocket-transport.ts"; import { ConnectionError } from "../connection/errors.ts"; import type { McpConnection } from "../connection/types.ts"; import type { ServicesConfig, HttpService, WebSocketService } from "../config/index.ts"; diff --git a/tests/connection/websocket-transport.test.ts b/tests/connection/websocket-transport.test.ts index da88d28..dc22ea8 100644 --- a/tests/connection/websocket-transport.test.ts +++ b/tests/connection/websocket-transport.test.ts @@ -145,11 +145,6 @@ describe("ServicesConfig with websocket backend", () => { }); describe("WebSocket transport module imports", () => { - test("connectToWebSocketService is exported from connection index", async () => { - const mod = await import("../../src/connection/index.ts"); - expect(typeof mod.connectToWebSocketService).toBe("function"); - }); - test("connectToWebSocketService function exists in websocket-transport module", async () => { const mod = await import("../../src/connection/websocket-transport.ts"); expect(typeof mod.connectToWebSocketService).toBe("function"); From af0ed4663112621e052d18cc9feb8b53e6ea3bd9 Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Mon, 9 Mar 2026 20:03:25 -0400 Subject: [PATCH 5/6] fix(ci): use explicit LiteLLM model aliases in claude-code-review workflow The claude-code-action expands 'sonnet' to 'claude-sonnet-4-6' internally, which isn't a registered alias in our LiteLLM proxy. Using 'sonnet4.6' and 'opus4.6' directly matches the LiteLLM model registry. Co-Authored-By: Claude Opus 4.6 --- .github/workflows/claude-code-review.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/claude-code-review.yml b/.github/workflows/claude-code-review.yml index 7cba919..0be8bca 100644 --- a/.github/workflows/claude-code-review.yml +++ b/.github/workflows/claude-code-review.yml @@ -19,9 +19,9 @@ jobs: env: ANTHROPIC_BASE_URL: http://10.71.20.53:4000 ANTHROPIC_API_KEY: ${{ secrets.LITELLM_API_KEY }} - ANTHROPIC_MODEL: sonnet - ANTHROPIC_DEFAULT_SONNET_MODEL: sonnet - ANTHROPIC_DEFAULT_OPUS_MODEL: opus + ANTHROPIC_MODEL: sonnet4.6 + ANTHROPIC_DEFAULT_SONNET_MODEL: sonnet4.6 + ANTHROPIC_DEFAULT_OPUS_MODEL: opus4.6 ANTHROPIC_DEFAULT_HAIKU_MODEL: haiku steps: - name: Checkout @@ -34,7 +34,7 @@ jobs: with: anthropic_api_key: ${{ secrets.LITELLM_API_KEY }} github_token: ${{ secrets.GITHUB_TOKEN }} - model: "sonnet" + model: "sonnet4.6" direct_prompt: | Review this PR for the mcp2cli project (CLI bridge for MCP servers). Focus on: 1. Bugs and logic errors From f9add9c84ea30cc838d046cd8989e4b4cc9b78cd Mon Sep 17 00:00:00 2001 From: Rodaddy Date: Mon, 9 Mar 2026 20:24:46 -0400 Subject: [PATCH 6/6] ci: retrigger PR checks after runner migration to dedicated LXC 106 Co-Authored-By: Claude Opus 4.6