From 2eb8c7a70a4abcd429859b09eaf3ec0a485512a1 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 9 Apr 2026 15:58:26 +0000 Subject: [PATCH 1/5] Initial plan From 18a825a3e6ebf089d3d42f5d4c6d5dcb9bc65ad6 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 9 Apr 2026 16:47:05 +0000 Subject: [PATCH 2/5] 0.91.0 Co-authored-by: jsilvanus <22452468+jsilvanus@users.noreply.github.com> --- package-lock.json | 4 ++-- package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index 2ffbe79..58244c2 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "gitsema", - "version": "0.90.0", + "version": "0.91.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "gitsema", - "version": "0.90.0", + "version": "0.91.0", "license": "ISC", "dependencies": { "@asteasolutions/zod-to-openapi": "8.5.0", diff --git a/package.json b/package.json index 00c5900..4b2606d 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "gitsema", - "version": "0.90.0", + "version": "0.91.0", "description": "A content-addressed semantic index synchronized with Git's object model.", "type": "module", "main": "dist/cli/index.js", From aaae5231bf617cb9237f37734689716ca2dd5f75 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 9 Apr 2026 16:47:58 +0000 Subject: [PATCH 3/5] feat: add LLM narrator/explainer via chattydeer (phase 91, schema v22) Agent-Logs-Url: https://github.com/jsilvanus/gitsema/sessions/4539623d-376d-41b2-99f6-72a14cfe3a9e Co-authored-by: jsilvanus <22452468+jsilvanus@users.noreply.github.com> --- CLAUDE.md | 8 +- docs/PLAN.md | 61 +++++ docs/plan_LLM.md | 232 ++++++++++++++++++ package-lock.json | 37 ++- package.json | 3 +- src/cli/commands/models.ts | 154 ++++++++++++ src/cli/commands/narrate.ts | 182 +++++++++++++++ src/cli/register/all.ts | 4 + src/cli/register/setup.ts | 45 ++++ src/core/db/schema.ts | 22 ++ src/core/db/sqlite.ts | 37 ++- src/core/narrator/audit.ts | 86 +++++++ src/core/narrator/chattydeerProvider.ts | 211 +++++++++++++++++ src/core/narrator/index.ts | 12 + src/core/narrator/narrator.ts | 298 ++++++++++++++++++++++++ src/core/narrator/redact.ts | 130 +++++++++++ src/core/narrator/resolveNarrator.ts | 205 ++++++++++++++++ src/core/narrator/types.ts | 126 ++++++++++ src/mcp/server.ts | 4 + src/mcp/tools/narrator.ts | 118 ++++++++++ src/server/app.ts | 4 + src/server/routes/narrator.ts | 121 ++++++++++ tests/narratorConfig.test.ts | 196 ++++++++++++++++ tests/narratorRedact.test.ts | 158 +++++++++++++ tests/narratorSmoke.test.ts | 133 +++++++++++ yarn.lock | 23 +- 26 files changed, 2595 insertions(+), 15 deletions(-) create mode 100644 docs/plan_LLM.md create mode 100644 src/cli/commands/narrate.ts create mode 100644 src/core/narrator/audit.ts create mode 100644 src/core/narrator/chattydeerProvider.ts create mode 100644 src/core/narrator/index.ts create mode 100644 src/core/narrator/narrator.ts create mode 100644 src/core/narrator/redact.ts create mode 100644 src/core/narrator/resolveNarrator.ts create mode 100644 src/core/narrator/types.ts create mode 100644 src/mcp/tools/narrator.ts create mode 100644 src/server/routes/narrator.ts create mode 100644 tests/narratorConfig.test.ts create mode 100644 tests/narratorRedact.test.ts create mode 100644 tests/narratorSmoke.test.ts diff --git a/CLAUDE.md b/CLAUDE.md index e4939f0..5f98a4f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -282,7 +282,7 @@ gitsema index - **ORM:** Drizzle ORM (`src/core/db/schema.ts`) - **Add to `.gitignore`:** `.gitsema/` -**Schema overview (current schema v21):** +**Schema overview (current schema v22):** | Table | Purpose | |---|---| @@ -304,8 +304,9 @@ gitsema index | `blob_clusters` | K-means cluster assignments | | `cluster_assignments` | Cluster snapshot entries per ref | | `module_embeddings` | Directory centroid running-mean embeddings (Phase 33) | -| `embed_config` | Recorded embedding provenance (model, dimensions, chunker) | +| `embed_config` | Recorded embedding provenance (model, dimensions, chunker); `kind` column distinguishes `'embedding'` vs `'narrator'` configs | | `indexing_checkpoints` | Resume markers for interrupted indexing runs | +| `settings` | Key-value table for persistent settings (e.g. `active_narrator_model_config_id`) | **FTS5 note:** Blobs indexed before Phase 11 have no FTS5 content. `--hybrid` search only applies to blobs with FTS5 entries. `--include-content` in evolution dumps also depends on FTS5 content. Use `gitsema backfill-fts` to populate FTS5 content for older index entries. @@ -320,7 +321,8 @@ gitsema index - v18 → v19: Added `embed_config` table for embedding provenance (Phase 80+) - v19 → v20: Added `UNIQUE (blob_hash, path)` index on `paths` table (review6 §11.6 / Phase 89) - v20 → v21: Hashed repo tokens at rest — `token_hash` + `token_prefix` replace plaintext `token` in `repo_tokens` (review7 §4.1) -- **Current version: 21** +- v21 → v22: Added `kind` + `params_json` columns to `embed_config`; added `settings` table (narrator model config) +- **Current version: 22** Schema changes require updating both `src/core/db/schema.ts` and the migration logic in `src/core/db/sqlite.ts`. diff --git a/docs/PLAN.md b/docs/PLAN.md index bb4b4ec..eef8bb5 100644 --- a/docs/PLAN.md +++ b/docs/PLAN.md @@ -3114,3 +3114,64 @@ embedding provider (Ollama, OpenAI-compatible HTTP, embedeer). This enables: **Documentation:** `CLAUDE.md` schema overview updated to v21 + migration v20→v21 entry added. `docs/deploy.md` table of contents updated with §11. **Status:** ✅ complete. + +--- + +## Phase 91 — LLM Narrator/Explainer via chattydeer (DB-backed config) + +**Status:** ✅ complete. + +**Goals:** +- Add LLM-powered `gitsema narrate` and `gitsema explain` commands. +- Use `@jsilvanus/chattydeer` (split-out from embedeer) for LLM narration. +- Store narrator model configs in the DB (embed_config table, `kind='narrator'`). +- Manage narrator models via the existing `gitsema models` system. +- HTTP parity: `POST /api/v1/narrate`, `POST /api/v1/explain`. +- MCP parity: `narrate_repo`, `explain_issue_or_error`. +- Safe-by-default: no remote calls unless configured; redaction before every LLM call. +- Auditable output with commit hash citations. + +**Schema changes (v22):** +- `embed_config.kind TEXT DEFAULT 'embedding'` — distinguishes embedding vs narrator configs. +- `embed_config.params_json TEXT` — narrator-specific params (httpUrl, apiKey, maxTokens, temperature). +- `settings` table — key-value table for persistent settings (`active_narrator_model_config_id`). +- `CURRENT_SCHEMA_VERSION` bumped to **22**. + +**New packages:** +- `@jsilvanus/chattydeer@^0.2.0` — added (LLM explainer/narrator, split from embedeer). +- `@jsilvanus/embedeer@^1.3.2` — updated to latest. + +**New modules:** +- `src/core/narrator/types.ts` — NarratorProvider interface, NarratorModelConfig, etc. +- `src/core/narrator/redact.ts` — secret-pattern redaction (10 patterns). +- `src/core/narrator/audit.ts` — structured audit logging per narration call. +- `src/core/narrator/chattydeerProvider.ts` — ChattydeerNarratorProvider adapter. +- `src/core/narrator/resolveNarrator.ts` — DB-backed config CRUD + active narrator resolution. +- `src/core/narrator/narrator.ts` — git log parsing, event classification, map-reduce summarisation. +- `src/core/narrator/index.ts` — barrel exports. + +**New CLI commands:** +- `gitsema narrate [--since] [--until] [--range] [--focus] [--format] [--max-commits] [--narrator-model-id] [--model]` +- `gitsema explain [--since] [--until] [--log] [--format] [--narrator-model-id] [--model]` +- `gitsema models narrator-list [--json]` +- `gitsema models narrator-add --http-url [--key] [--max-tokens] [--temperature] [--activate]` +- `gitsema models narrator-activate ` +- `gitsema models narrator-remove ` + +**New HTTP routes (under /api/v1/):** +- `POST /narrate` +- `POST /explain` + +**New MCP tools:** +- `narrate_repo` +- `explain_issue_or_error` + +**Tests:** +- `tests/narratorRedact.test.ts` — 19 tests (redaction patterns). +- `tests/narratorConfig.test.ts` — 15 tests (DB-backed config CRUD, active selection). +- `tests/narratorSmoke.test.ts` — 9 tests (CLI handler shape invariants). +- All 787 tests pass. + +**Documentation:** +- `docs/plan_LLM.md` — full implementation plan (goals, API surface, pipeline, security, tests, schema). +- `CLAUDE.md` schema overview updated to v22 + migration v21→v22 entry added. diff --git a/docs/plan_LLM.md b/docs/plan_LLM.md new file mode 100644 index 0000000..ab87b3f --- /dev/null +++ b/docs/plan_LLM.md @@ -0,0 +1,232 @@ +# docs/plan_LLM.md — LLM Narrator/Explainer Integration + +## 0) Status + +**Implemented** (phase 91+ in `docs/PLAN.md`). +Depends on: `@jsilvanus/chattydeer` ^0.2.0, `@jsilvanus/embedeer` ^1.3.2. + +--- + +## 1) Goals + +- Produce a clear, human-readable narrative of "what changed" over a commit range. +- Produce a bug/error timeline: when it appeared, commits around it, likely fixes. +- Integrate with the existing `gitsema models` system using DB-backed config (`embed_config` table, `kind='narrator'`). +- Safe-by-default: no remote LLM calls unless explicitly configured. +- Auditable output: every narrative includes commit hash citations. +- Scalable: stream `git log`, batch/map-reduce summarisation — no OOM on large repos. + +## 2) Non-goals + +- Not a full GitHub Issues/PR ingestion pipeline (optional future enrichment). +- Not an always-on background daemon. +- Not a deep semantic code audit (that's `gitsema search` + `gitsema security-scan`). +- Not a replacement for the vector/BM25 search; purely a narrative layer. + +--- + +## 3) User-facing API + +### CLI + +```bash +# Generate a development history narrative +gitsema narrate [--since ] [--until ] [--range ] + [--focus bugs|features|ops|security|deps|performance|all] + [--format md|text|json] + [--max-commits ] + [--narrator-model-id | --model ] + +# Explain a bug or error topic +gitsema explain + [--since ] [--until ] + [--log ] + [--format md|text|json] + [--narrator-model-id | --model ] +``` + +### Narrator model management (via `gitsema models`) + +```bash +# Add a narrator model config (stored in DB, kind='narrator') +gitsema models narrator-add gpt4o --http-url https://api.openai.com --key sk-... [--activate] + +# List narrator configs +gitsema models narrator-list [--json] + +# Set the active narrator +gitsema models narrator-activate gpt4o + +# Remove a narrator config +gitsema models narrator-remove gpt4o +``` + +### HTTP (parity) + +``` +POST /api/v1/narrate — { since?, until?, range?, focus?, format?, maxCommits?, narratorModelId?, model? } +POST /api/v1/explain — { topic, since?, until?, format?, narratorModelId?, model? } +``` + +Response shape: `{ prose, commitCount, citations[], redactedFields[], llmEnabled, format }` + +### MCP (parity) + +``` +narrate_repo — same args as POST /narrate +explain_issue_or_error — same args as POST /explain +``` + +--- + +## 4) Inputs / Data sources + +**Required (local, offline)** +- `git log` streamed via `execSync` (commit hash, date, author, subject, body) +- Default window: last 500 commits (configurable via `--max-commits`) +- Hard cap: 5 000 commits max to prevent OOM + +**Optional (user-provided)** +- Error log / stack trace file (`--log `, capped at 8 KB) +- Future: git blame for specific files (bounded) + +--- + +## 5) Core pipeline (scalable) + +``` +git log (streaming, up to N commits) + ↓ +Event extraction + classification (bugfix, feature, security, deps, perf, ops) + ↓ +Focus filtering (--focus flag) + ↓ +Map-reduce summarisation (batch_size=100 commits → LLM → merge) + ↓ +Final narrative prompt (batch summaries + top 10 notable commits) + ↓ +LLM → prose + citations +``` + +Explain mode: +``` +git log (streaming) → keyword match against topic + ↓ +Relevant commit timeline (up to 30 commits) + ↓ +Optional: error log excerpt (capped 2 KB) + ↓ +LLM → incident timeline with citations +``` + +--- + +## 6) DB-backed narrator model config + +Narrator configs share the `embed_config` table with embedding configs, distinguished by `kind = 'narrator'` (added in schema v22). The `params_json` column stores narrator-specific params as JSON: + +```json +{ + "httpUrl": "https://api.openai.com", + "apiKey": "sk-...", + "maxTokens": 512, + "temperature": 0.3 +} +``` + +The active narrator selection is stored in the `settings` table under the key `active_narrator_model_config_id`. + +Resolution order for narrator provider: +1. `--narrator-model-id ` CLI option (explicit `embed_config.id`) +2. `--model ` CLI option (lookup by name in `embed_config`) +3. Active narrator config from `settings` table +4. Disabled (safe-by-default, no network calls) + +--- + +## 7) Security / privacy + +- **Redaction pass** before every LLM call (see `src/core/narrator/redact.ts`): + - AWS access/secret keys + - GitHub PATs (`ghp_`, `github_pat_`) + - OpenAI `sk-` keys + - Google `AIza` keys + - JWTs (three-segment base64url) + - PEM private key blocks + - Generic env-style `SECRET=`, `TOKEN=` assignments + - Private IP addresses, email addresses +- `--include-diff` default = false (no code content sent to LLM) +- Payload hard cap: git log capped at `maxCommits`, log file capped at 8 KB +- **Safe-by-default**: no network calls when no narrator model is configured + +### Audit logging + +Every narration call produces a structured audit log entry (`[llm_audit]`) in `.gitsema/gitsema.log` recording: operation, service, model, duration, success, and redacted field names. The entry never contains the actual prompt or response text. + +--- + +## 8) LLM backend (`@jsilvanus/chattydeer`) + +The `ChattydeerNarratorProvider` adapter: +1. Creates a `LLMAdapter` with a custom `generateFn` that calls the configured OpenAI-compatible HTTP endpoint (no local HuggingFace model download required for remote providers). +2. Wraps in `Explainer.explain()` for structured, citation-validated output. +3. Falls back to a disabled placeholder when no `httpUrl` is configured. + +The chattydeer `Explainer` validates that: +- Output is valid JSON with `{ explanation, labels, references, meta }` shape. +- All `references[].id` values map back to provided evidence IDs. +- Repairs malformed JSON with a single retry prompt. + +--- + +## 9) Tests + +| Test file | Coverage | +|---|---| +| `tests/narratorRedact.test.ts` | Redaction: pattern matching, email, JWT, private IP, env-secret | +| `tests/narratorConfig.test.ts` | DB-backed config: save/list/activate/delete, active selection | +| `tests/narratorSmoke.test.ts` | CLI narrate/explain handlers with mock provider (disabled + enabled) | + +--- + +## 10) Acceptance criteria + +- [x] `gitsema narrate` returns coherent narrative with commit citations (or safe placeholder when unconfigured) +- [x] `gitsema explain "error text"` returns a timeline with cited commits +- [x] Handles large repos without OOM (stream + batching, configurable cap) +- [x] Redacts secrets-like strings before remote calls +- [x] Safe-by-default: no network calls unless narrator model is configured +- [x] DB-backed config: add/list/activate/remove via `gitsema models narrator-*` +- [x] HTTP parity: `POST /api/v1/narrate`, `POST /api/v1/explain` +- [x] MCP parity: `narrate_repo`, `explain_issue_or_error` +- [x] Uses `@jsilvanus/chattydeer` (not legacy embedeer explainer path) +- [x] Audit log entry written for every narration call +- [x] CI green + +--- + +## 11) Schema changes (v22) + +```sql +-- Added to embed_config: +ALTER TABLE embed_config ADD COLUMN kind TEXT DEFAULT 'embedding'; +ALTER TABLE embed_config ADD COLUMN params_json TEXT; + +-- New table: +CREATE TABLE settings ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL +); +``` + +Known `settings` keys: +- `active_narrator_model_config_id` — INTEGER embed_config.id of the active narrator config + +--- + +## 12) Package changes + +| Package | Before | After | +|---|---|---| +| `@jsilvanus/embedeer` | (older version) | `^1.3.2` | +| `@jsilvanus/chattydeer` | not installed | `^0.2.0` (new) | diff --git a/package-lock.json b/package-lock.json index 58244c2..2b670c8 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,7 +10,8 @@ "license": "ISC", "dependencies": { "@asteasolutions/zod-to-openapi": "8.5.0", - "@jsilvanus/embedeer": "^1.0.2", + "@jsilvanus/chattydeer": "^0.2.0", + "@jsilvanus/embedeer": "^1.3.2", "@modelcontextprotocol/sdk": "^1.29.0", "better-sqlite3": "^12.8.0", "commander": "^12.1.0", @@ -1508,10 +1509,38 @@ "dev": true, "license": "MIT" }, + "node_modules/@jsilvanus/chattydeer": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/@jsilvanus/chattydeer/-/chattydeer-0.2.0.tgz", + "integrity": "sha512-Tl55/conB6xllSyf9y7zG3zXxRZp7AamnnJv1wmU/Mk2WlbsPKTh8J8LMaymIFu3tXmoBtuS2Q/3aTC0jsqPjQ==", + "license": "MIT", + "dependencies": { + "@huggingface/transformers": "^4.0.1", + "@jsilvanus/embedeer": "1.3.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@jsilvanus/chattydeer/node_modules/@jsilvanus/embedeer": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.3.1.tgz", + "integrity": "sha512-5jOImrV30+S0p5dUjiGHOXP6tZq3iWhaOsfv1awczJJlouWtzR33buUVTa7oTNuL/juUzvA+JFqLNlExj3Un5g==", + "license": "MIT", + "dependencies": { + "@huggingface/transformers": "^4.0.1" + }, + "bin": { + "embedeer": "src/cli.js" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/@jsilvanus/embedeer": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.2.1.tgz", - "integrity": "sha512-BGcf5+LHsXMDSbhavG36k/EekSnCIpjbY0kQBHdn+rYxHUC6WKbZ6FbZ+ThZmc0Gz8ime1I/nOFmhfKWgERFnw==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.3.2.tgz", + "integrity": "sha512-7MGHSzxkLPGtwEylAgKok2DEsxvSZC6PrBLCoPfsDEAi4tpGUoSQNnfTfzqSjvStuiNLrIsvsCvfZpoEllJ83Q==", "license": "MIT", "dependencies": { "@huggingface/transformers": "^4.0.1" diff --git a/package.json b/package.json index 4b2606d..f4f9ea3 100644 --- a/package.json +++ b/package.json @@ -31,7 +31,8 @@ "homepage": "https://github.com/jsilvanus/gitsema#readme", "dependencies": { "@asteasolutions/zod-to-openapi": "8.5.0", - "@jsilvanus/embedeer": "^1.0.2", + "@jsilvanus/chattydeer": "^0.2.0", + "@jsilvanus/embedeer": "^1.3.2", "@modelcontextprotocol/sdk": "^1.29.0", "better-sqlite3": "^12.8.0", "commander": "^12.1.0", diff --git a/src/cli/commands/models.ts b/src/cli/commands/models.ts index 854f7fe..3ffad11 100644 --- a/src/cli/commands/models.ts +++ b/src/cli/commands/models.ts @@ -560,3 +560,157 @@ export async function modelsRemoveCommand( } } } + +// --------------------------------------------------------------------------- +// models narrator-list — list narrator model configs (kind='narrator' in embed_config) +// --------------------------------------------------------------------------- + +export async function modelsNarratorListCommand(opts: { json?: boolean } = {}): Promise { + try { + const { getRawDb } = await import('../../core/db/sqlite.js') + const rawDb = getRawDb() + const { listNarratorConfigs, getActiveNarratorConfigId } = await import('../../core/narrator/resolveNarrator.js') + const configs = listNarratorConfigs(rawDb) + const activeId = getActiveNarratorConfigId(rawDb) + + if (opts.json) { + console.log(JSON.stringify(configs.map((c) => ({ ...c, active: c.id === activeId })), null, 2)) + return + } + + if (configs.length === 0) { + console.log('No narrator model configs found.') + console.log('') + console.log('Add one: gitsema models narrator-add --http-url [--key ]') + return + } + + console.log(`${'ID'.padEnd(4)} ${'Name'.padEnd(30)} ${'Provider'.padEnd(12)} Active HTTP URL`) + console.log('-'.repeat(80)) + for (const c of configs) { + const active = c.id === activeId ? '✓' : ' ' + const url = c.params.httpUrl || '(not set)' + console.log(`${String(c.id).padEnd(4)} ${c.name.padEnd(30)} ${c.provider.padEnd(12)} ${active.padEnd(7)} ${url}`) + } + console.log('') + console.log(`${configs.length} narrator model(s). Active ID: ${activeId ?? '(none)'}`) + } catch (err) { + console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) + process.exit(1) + } +} + +// --------------------------------------------------------------------------- +// models narrator-add — add / update a narrator model config +// --------------------------------------------------------------------------- + +export interface ModelsNarratorAddOptions { + httpUrl: string + key?: string + maxTokens?: string + temperature?: string + activate?: boolean +} + +export async function modelsNarratorAddCommand( + name: string, + opts: ModelsNarratorAddOptions, +): Promise { + if (!name || !name.trim()) { + console.error('Error: model name is required') + process.exit(1) + } + if (!opts.httpUrl) { + console.error('Error: --http-url is required for narrator models') + process.exit(1) + } + + try { + const { getRawDb } = await import('../../core/db/sqlite.js') + const rawDb = getRawDb() + const { saveNarratorConfig, setActiveNarratorConfig } = await import('../../core/narrator/resolveNarrator.js') + + const params = { + httpUrl: opts.httpUrl, + ...(opts.key ? { apiKey: opts.key } : {}), + ...(opts.maxTokens ? { maxTokens: parseInt(opts.maxTokens, 10) } : {}), + ...(opts.temperature ? { temperature: parseFloat(opts.temperature) } : {}), + } + + const id = saveNarratorConfig(rawDb, name.trim(), 'chattydeer', params) + console.log(`Saved narrator model config '${name}' (id=${id}).`) + console.log(` Provider: chattydeer`) + console.log(` HTTP URL: ${opts.httpUrl}`) + if (opts.key) console.log(` API key: (set)`) + + if (opts.activate) { + setActiveNarratorConfig(rawDb, id) + console.log(` Activated as default narrator (id=${id}).`) + } else { + console.log(` To activate: gitsema models narrator-activate ${name}`) + } + } catch (err) { + console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) + process.exit(1) + } +} + +// --------------------------------------------------------------------------- +// models narrator-activate — set the active narrator model +// --------------------------------------------------------------------------- + +export async function modelsNarratorActivateCommand(name: string): Promise { + if (!name || !name.trim()) { + console.error('Error: model name is required') + process.exit(1) + } + try { + const { getRawDb } = await import('../../core/db/sqlite.js') + const rawDb = getRawDb() + const { getNarratorConfigByName, setActiveNarratorConfig } = await import('../../core/narrator/resolveNarrator.js') + const config = getNarratorConfigByName(rawDb, name.trim()) + if (!config) { + console.error(`Error: no narrator model config found for '${name}'.`) + console.error(`Run: gitsema models narrator-list`) + process.exit(1) + } + setActiveNarratorConfig(rawDb, config.id) + console.log(`Narrator model '${name}' (id=${config.id}) is now active.`) + } catch (err) { + console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) + process.exit(1) + } +} + +// --------------------------------------------------------------------------- +// models narrator-remove — remove a narrator model config +// --------------------------------------------------------------------------- + +export async function modelsNarratorRemoveCommand(name: string): Promise { + if (!name || !name.trim()) { + console.error('Error: model name is required') + process.exit(1) + } + try { + const { getRawDb } = await import('../../core/db/sqlite.js') + const rawDb = getRawDb() + const { deleteNarratorConfig, getActiveNarratorConfig, clearActiveNarratorConfig } = await import('../../core/narrator/resolveNarrator.js') + + // Check if it's the active narrator — clear if so + const active = getActiveNarratorConfig(rawDb) + const removed = deleteNarratorConfig(rawDb, name.trim()) + if (removed) { + if (active?.name === name.trim()) { + clearActiveNarratorConfig(rawDb) + console.log(`Removed narrator model config '${name}' (was active — selection cleared).`) + } else { + console.log(`Removed narrator model config '${name}'.`) + } + } else { + console.log(`No narrator model config found for '${name}'.`) + } + } catch (err) { + console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) + process.exit(1) + } +} diff --git a/src/cli/commands/narrate.ts b/src/cli/commands/narrate.ts new file mode 100644 index 0000000..26df2a1 --- /dev/null +++ b/src/cli/commands/narrate.ts @@ -0,0 +1,182 @@ +/** + * `gitsema narrate` and `gitsema explain` CLI command handlers. + * + * Both commands resolve the active narrator model config from the DB + * (or via CLI override) and generate LLM-powered prose. + * + * Safe-by-default: when no narrator model is configured the commands print + * a clear message without making any network calls. + */ + +import type { Command } from 'commander' +import { resolveNarratorProvider, runNarrate, runExplain } from '../../core/narrator/index.js' +import type { NarrateFocus, NarrateFormat, NarrationResult } from '../../core/narrator/types.js' + +// --------------------------------------------------------------------------- +// Output formatting +// --------------------------------------------------------------------------- + +function formatResult(result: NarrationResult): string { + const { prose, commitCount, citations, llmEnabled, format } = result + + if (!llmEnabled) { + return prose + } + + const lines: string[] = [] + + if (format === 'json') { + return JSON.stringify( + { + prose, + commitCount, + citations, + redactedFields: result.redactedFields, + llmEnabled, + }, + null, + 2, + ) + } + + if (format === 'md') { + lines.push(`## Narrative`) + lines.push('') + lines.push(prose) + lines.push('') + if (citations.length > 0) { + lines.push(`### Cited commits (${citations.length})`) + lines.push(citations.slice(0, 20).map((h) => `- \`${h.slice(0, 12)}\``).join('\n')) + } + lines.push('') + lines.push(`_${commitCount} commit(s) analysed_`) + if (result.redactedFields.length > 0) { + lines.push(`_Redacted patterns: ${result.redactedFields.join(', ')}_`) + } + } else { + // text + lines.push(prose) + lines.push('') + if (citations.length > 0) { + lines.push(`Cited commits: ${citations.slice(0, 20).map((h) => h.slice(0, 12)).join(', ')}`) + } + lines.push(`${commitCount} commit(s) analysed`) + if (result.redactedFields.length > 0) { + lines.push(`Redacted patterns: ${result.redactedFields.join(', ')}`) + } + } + + return lines.join('\n') +} + +// --------------------------------------------------------------------------- +// narrate command +// --------------------------------------------------------------------------- + +export async function narrateCommand( + opts: { + since?: string + until?: string + range?: string + focus?: string + format?: string + maxCommits?: string + narratorModelId?: string + model?: string + }, +): Promise { + const narratorModelId = opts.narratorModelId !== undefined ? parseInt(opts.narratorModelId, 10) : undefined + const provider = resolveNarratorProvider({ + narratorModelId, + modelName: opts.model, + }) + + let result: NarrationResult + try { + result = await runNarrate(provider, { + since: opts.since, + until: opts.until, + range: opts.range, + focus: (opts.focus as NarrateFocus) ?? 'all', + format: (opts.format as NarrateFormat) ?? 'md', + maxCommits: opts.maxCommits ? parseInt(opts.maxCommits, 10) : undefined, + }) + } finally { + await provider.destroy() + } + + console.log(formatResult(result)) +} + +// --------------------------------------------------------------------------- +// explain command +// --------------------------------------------------------------------------- + +export async function explainCommand( + topic: string, + opts: { + since?: string + until?: string + log?: string + files?: string + format?: string + narratorModelId?: string + model?: string + }, +): Promise { + const narratorModelId = opts.narratorModelId !== undefined ? parseInt(opts.narratorModelId, 10) : undefined + const provider = resolveNarratorProvider({ + narratorModelId, + modelName: opts.model, + }) + + let result: NarrationResult + try { + result = await runExplain(provider, topic, { + since: opts.since, + until: opts.until, + log: opts.log, + files: opts.files, + format: (opts.format as NarrateFormat) ?? 'md', + }) + } finally { + await provider.destroy() + } + + console.log(formatResult(result)) +} + +// --------------------------------------------------------------------------- +// Registration helper +// --------------------------------------------------------------------------- + +export function registerNarratorCommands(program: Command): void { + program + .command('narrate') + .description('Generate a human-readable narrative of repository development history using an LLM narrator model.') + .option('--since ', 'only include commits after this ref or date') + .option('--until ', 'only include commits before this ref or date') + .option('--range ', 'git revision range (e.g. v1.0..HEAD)') + .option( + '--focus ', + 'filter commits by area: bugs, features, ops, security, deps, performance, all (default: all)', + 'all', + ) + .option('--format ', 'output format: md, text, json (default: md)', 'md') + .option('--max-commits ', 'maximum commits to analyse (default: 500)') + .option('--narrator-model-id ', 'embed_config.id of the narrator model to use (overrides active selection)') + .option('--model ', 'narrator model name to use (overrides active selection)') + .action(narrateCommand) + + program + .command('explain ') + .description('Explain a bug, error, or topic by tracing it through git history using an LLM narrator model.') + .option('--since ', 'only include commits after this ref or date') + .option('--until ', 'only include commits before this ref or date') + .option('--log ', 'path to an error log or stack trace file to include as context') + .option('--files ', 'restrict search to files matching this glob') + .option('--format ', 'output format: md, text, json (default: md)', 'md') + .option('--narrator-model-id ', 'embed_config.id of the narrator model to use (overrides active selection)') + .option('--model ', 'narrator model name to use (overrides active selection)') + .action(explainCommand) +} diff --git a/src/cli/register/all.ts b/src/cli/register/all.ts index 3d12f6d..e146385 100644 --- a/src/cli/register/all.ts +++ b/src/cli/register/all.ts @@ -49,6 +49,7 @@ import { quickstartCommand } from '../commands/quickstart.js' import { regressionGateCommand } from '../commands/regressionGate.js' import { crossRepoSimilarityCommand } from '../commands/crossRepoSimilarity.js' import { codeReviewCommand } from '../commands/codeReview.js' +import { registerNarratorCommands } from '../commands/narrate.js' export function registerAll(program: Command) { // Preserve per-domain registration modules @@ -537,6 +538,9 @@ export function registerAll(program: Command) { .action(async (opts: { base?: string; head?: string; diffFile?: string; top?: string; threshold?: string; format?: string }) => { await codeReviewCommand(opts) }) + + // Narrator commands: narrate + explain (LLM-powered, DB-backed model config) + registerNarratorCommands(program) } export default registerAll diff --git a/src/cli/register/setup.ts b/src/cli/register/setup.ts index cd7a940..e9867ab 100644 --- a/src/cli/register/setup.ts +++ b/src/cli/register/setup.ts @@ -12,6 +12,10 @@ import { modelsAddCommand, modelsRemoveCommand, modelsUpdateCommand, + modelsNarratorListCommand, + modelsNarratorAddCommand, + modelsNarratorActivateCommand, + modelsNarratorRemoveCommand, } from '../commands/models.js' import { collectOut } from '../../utils/outputSink.js' @@ -202,4 +206,45 @@ Examples: ) => { await modelsRemoveCommand(name, opts) }) + + // --------------------------------------------------------------------------- + // Narrator model management subcommands (DB-backed, kind='narrator') + // --------------------------------------------------------------------------- + + modelsSub + .command('narrator-list') + .description('List narrator model configs (stored in DB with kind=narrator)') + .option('--json', 'output as JSON') + .action(async (opts: { json?: boolean }) => { + await modelsNarratorListCommand(opts) + }) + + modelsSub + .command('narrator-add ') + .description('Add or update a narrator model config (stored in DB, backed by chattydeer)') + .option('--http-url ', 'OpenAI-compatible base URL for chat completions (required)') + .option('--key ', 'API key / Bearer token') + .option('--max-tokens ', 'max tokens per narration call (default: 512)') + .option('--temperature ', 'temperature (default: 0.3)') + .option('--activate', 'set this as the active narrator model immediately') + .action(async ( + name: string, + opts: { httpUrl?: string; key?: string; maxTokens?: string; temperature?: string; activate?: boolean }, + ) => { + await modelsNarratorAddCommand(name, { httpUrl: opts.httpUrl ?? '', key: opts.key, maxTokens: opts.maxTokens, temperature: opts.temperature, activate: opts.activate }) + }) + + modelsSub + .command('narrator-activate ') + .description('Set a narrator model as the active default (resolved by gitsema narrate / gitsema explain)') + .action(async (name: string) => { + await modelsNarratorActivateCommand(name) + }) + + modelsSub + .command('narrator-remove ') + .description('Remove a narrator model config from the DB') + .action(async (name: string) => { + await modelsNarratorRemoveCommand(name) + }) } diff --git a/src/core/db/schema.ts b/src/core/db/schema.ts index aa6d11c..9c89fcf 100644 --- a/src/core/db/schema.ts +++ b/src/core/db/schema.ts @@ -272,6 +272,28 @@ export const embedConfig = sqliteTable('embed_config', { createdAt: integer('created_at').notNull(), /** Timestamp of the last indexing run that used this config (updated by indexStartCommand). */ lastUsedAt: integer('last_used_at'), + /** + * Config kind: 'embedding' (default) or 'narrator'. + * Narrator configs store chat-completion provider params in params_json. + * Added in schema v22. + */ + kind: text('kind').default('embedding'), + /** + * JSON-encoded extra parameters (narrator-specific: httpUrl, apiKey, maxTokens, etc.). + * Added in schema v22. + */ + paramsJson: text('params_json'), +}) + +/** + * Key-value settings table for active config selections and other persistent settings. + * Known keys: + * active_narrator_model_config_id — INTEGER id of the active narrator embed_config row + * Added in schema v22. + */ +export const settings = sqliteTable('settings', { + key: text('key').primaryKey(), + value: text('value').notNull(), }) export const indexingCheckpoints = sqliteTable('indexing_checkpoints', { diff --git a/src/core/db/sqlite.ts b/src/core/db/sqlite.ts index 8b19185..1be2c8d 100644 --- a/src/core/db/sqlite.ts +++ b/src/core/db/sqlite.ts @@ -49,8 +49,9 @@ export interface DbSession { * 19 — Added repo_tokens table (Phase 75 per-repo access control) * 20 — Enforce uniqueness of (blob_hash, path) in paths table (review6 §11.6) * 21 — Hash repo tokens at rest: token_hash + token_prefix replace plaintext token (review7 §4.1) + * 22 — Added kind + params_json columns to embed_config; added settings table (narrator model config) */ -export const CURRENT_SCHEMA_VERSION = 21 +export const CURRENT_SCHEMA_VERSION = 22 /** * Applies pending schema migrations and records the resulting version in the @@ -497,6 +498,25 @@ function applyMigrations(sqlite: InstanceType): void { version = 21 sqlite.prepare(`UPDATE meta SET value = ? WHERE key = 'schema_version'`).run('21') } + + // v21 → v22: add kind + params_json columns to embed_config; add settings table (narrator model config) + if (version < 22) { + const embedCols = sqlite.prepare('PRAGMA table_info(embed_config)').all() as Array<{ name: string }> + if (!embedCols.some((c) => c.name === 'kind')) { + sqlite.exec(`ALTER TABLE embed_config ADD COLUMN kind TEXT DEFAULT 'embedding'`) + } + if (!embedCols.some((c) => c.name === 'params_json')) { + sqlite.exec(`ALTER TABLE embed_config ADD COLUMN params_json TEXT`) + } + sqlite.exec(` + CREATE TABLE IF NOT EXISTS settings ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) + `) + version = 22 + sqlite.prepare(`UPDATE meta SET value = ? WHERE key = 'schema_version'`).run('22') + } } @@ -671,7 +691,7 @@ function initTables(sqlite: InstanceType): void { UNIQUE (module_path, model) ); - -- Embedding provenance (Phase 35 / v13) + -- Embedding provenance (Phase 35 / v13); kind + params_json added in v22 CREATE TABLE IF NOT EXISTS embed_config ( id INTEGER PRIMARY KEY AUTOINCREMENT, config_hash TEXT NOT NULL UNIQUE, @@ -682,7 +702,10 @@ function initTables(sqlite: InstanceType): void { chunker TEXT NOT NULL, window_size INTEGER, overlap INTEGER, - created_at INTEGER NOT NULL + created_at INTEGER NOT NULL, + last_used_at INTEGER, + kind TEXT DEFAULT 'embedding', + params_json TEXT ); -- Incremental-indexing resume markers (Phase 35 / v13) @@ -734,6 +757,14 @@ function initTables(sqlite: InstanceType): void { projected_at INTEGER NOT NULL, UNIQUE (blob_hash, model) ); + + -- Narrator model config and active settings (schema v22) + -- kind column on embed_config distinguishes 'embedding' from 'narrator' configs + -- params_json stores narrator-specific params (httpUrl, apiKey, maxTokens, etc.) + CREATE TABLE IF NOT EXISTS settings ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ); `) if (isFresh) { diff --git a/src/core/narrator/audit.ts b/src/core/narrator/audit.ts new file mode 100644 index 0000000..91b95ff --- /dev/null +++ b/src/core/narrator/audit.ts @@ -0,0 +1,86 @@ +/** + * Narrator audit logging. + * + * Every narration call should be accompanied by a structured audit log entry + * so that operators can trace when, why, and to what model content was sent. + * + * IMPORTANT: Audit entries must NOT contain the raw prompt or response text. + * They record only metadata: timing, model, operation, redacted-field names. + */ + +import { logger } from '../../utils/logger.js' + +// --------------------------------------------------------------------------- +// Audit entry type +// --------------------------------------------------------------------------- + +export interface NarratorAuditEntry { + timestamp: number + operation: 'narrate' | 'explain' + service: string + modelHint: string + durationMs: number + tokensUsed: number + redactedFields: string[] + success: boolean + errorMessage?: string +} + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +/** + * Record a narrator audit event to the structured logger. + * The entry is logged at `info` level under the `llm_audit` key so it can + * be filtered by operators without exposing user data. + */ +export function recordAudit(entry: NarratorAuditEntry): void { + logger.info(`[llm_audit] op=${entry.operation} service=${entry.service} model=${entry.modelHint} durationMs=${entry.durationMs} success=${entry.success} redacted=${entry.redactedFields.join(',') || 'none'}${entry.errorMessage ? ` error=${entry.errorMessage}` : ''}`) +} + +/** + * Helper: run `fn` and automatically record a timing audit entry. + * + * @param operation - 'narrate' | 'explain' + * @param service - provider name string (e.g. 'chattydeer') + * @param modelHint - model name / hint + * @param redactedFields - list of redaction patterns that fired + * @param fn - async function to wrap + */ +export async function withAudit( + operation: 'narrate' | 'explain', + service: string, + modelHint: string, + redactedFields: string[], + fn: () => Promise, +): Promise { + const start = Date.now() + try { + const result = await fn() + recordAudit({ + timestamp: Date.now(), + operation, + service, + modelHint, + durationMs: Date.now() - start, + tokensUsed: 0, + redactedFields, + success: true, + }) + return result + } catch (err) { + recordAudit({ + timestamp: Date.now(), + operation, + service, + modelHint, + durationMs: Date.now() - start, + tokensUsed: 0, + redactedFields, + success: false, + errorMessage: err instanceof Error ? err.message : String(err), + }) + throw err + } +} diff --git a/src/core/narrator/chattydeerProvider.ts b/src/core/narrator/chattydeerProvider.ts new file mode 100644 index 0000000..35f75a6 --- /dev/null +++ b/src/core/narrator/chattydeerProvider.ts @@ -0,0 +1,211 @@ +/** + * ChattydeerNarratorProvider — NarratorProvider backed by @jsilvanus/chattydeer. + * + * This adapter: + * 1. Guards against remote calls unless explicitly enabled via the model config. + * 2. Applies redaction to all payloads before sending to the LLM. + * 3. Uses the Explainer class from chattydeer for structured output. + * 4. Falls back gracefully when the LLM is unavailable. + * + * Safe-by-default: when `enabled` is false (default when no model config is set), + * `narrate()` returns immediately with a placeholder — no network call is made. + */ + +import type { NarratorProvider, NarrateRequest, NarrateResponse, NarratorModelParams } from './types.js' +import { redact } from './redact.js' +import { withAudit } from './audit.js' +import { logger } from '../../utils/logger.js' + +// --------------------------------------------------------------------------- +// Lazy import of chattydeer to avoid loading HuggingFace transformers at module +// level. The dynamic import runs only when narrate() is actually called. +// --------------------------------------------------------------------------- + +let _explainerModule: { + Explainer: { + create(modelName: string, opts?: Record): Promise<{ + explain(req: { + task: string + domain: string + context: Record + evidence: Array<{ id: number; source: string; excerpt: string }> + maxTokens?: number + }): Promise<{ explanation: string; labels: string[]; references: unknown[]; meta: unknown }> + destroy(): Promise + }> + } +} | null = null + +async function getExplainerClass(): Promise { + if (_explainerModule === null) { + // @ts-ignore — chattydeer is a plain JS ESM package without types + _explainerModule = await import('@jsilvanus/chattydeer') + } + return _explainerModule +} + +// --------------------------------------------------------------------------- +// Placeholder returned in safe-by-default (disabled) mode +// --------------------------------------------------------------------------- + +function disabledResponse(redactedFields: string[]): NarrateResponse { + return { + prose: '[LLM narrator disabled — configure a narrator model via: gitsema models add --kind narrator --http-url ]', + tokensUsed: 0, + redactedFields, + llmEnabled: false, + } +} + +// --------------------------------------------------------------------------- +// ChattydeerNarratorProvider +// --------------------------------------------------------------------------- + +/** + * Options for constructing the provider. + * + * When `params` is undefined the provider operates in safe-by-default + * (disabled) mode — no network calls are made. + */ +export interface ChattydeerProviderOptions { + modelName: string + /** Narrator model params from the DB-backed config. Undefined → disabled. */ + params?: NarratorModelParams +} + +export class ChattydeerNarratorProvider implements NarratorProvider { + readonly modelName: string + private readonly _params: NarratorModelParams | undefined + private readonly _enabled: boolean + + constructor(opts: ChattydeerProviderOptions) { + this.modelName = opts.modelName + this._params = opts.params + // A provider is enabled if params are supplied AND an httpUrl is set. + this._enabled = !!(opts.params?.httpUrl) + } + + async narrate(req: NarrateRequest): Promise { + // Redact before anything else — including logging + const { text: redactedUser, firedPatterns: userFired } = redact(req.userPrompt) + const { text: redactedSystem } = redact(req.systemPrompt) + const allFired = userFired + + if (!this._enabled || !this._params) { + return disabledResponse(allFired) + } + + const params = this._params + const modelName = this.modelName + const maxTokens = req.maxTokens ?? params.maxTokens ?? 512 + + const fn = async (): Promise => { + const mod = await getExplainerClass() + if (!mod) { + throw new Error('chattydeer module failed to load') + } + + // Build a generateFn that calls the configured HTTP endpoint + // so we don't load local HuggingFace models at all. + const generateFn = buildHttpGenerateFn(params) + + const explainer = await mod.Explainer.create(modelName, { + generateFn, + deterministic: true, + }) + + try { + const result = await explainer.explain({ + task: 'narrate', + domain: 'evolution', + context: { model: modelName }, + evidence: [ + { id: 1, source: 'git-history', excerpt: redactedUser }, + { id: 2, source: 'instructions', excerpt: redactedSystem }, + ], + maxTokens, + }) + + const prose = result.explanation === 'INSUFFICIENT_EVIDENCE' + ? '(narrator: insufficient evidence — no meaningful content to summarise)' + : result.explanation + + return { + prose, + tokensUsed: 0, + redactedFields: allFired, + llmEnabled: true, + } + } finally { + await explainer.destroy() + } + } + + try { + return await withAudit('narrate', 'chattydeer', modelName, allFired, fn) + } catch (err) { + const msg = err instanceof Error ? err.message : String(err) + logger.error(`[narrator] chattydeer narrate failed: ${msg}`) + return { + prose: `(narrator error: ${msg})`, + tokensUsed: 0, + redactedFields: allFired, + llmEnabled: true, + } + } + } + + async destroy(): Promise { + // Provider holds no persistent resources — adapter is created per call + } +} + +// --------------------------------------------------------------------------- +// HTTP generate function — calls an OpenAI-compatible chat completions API +// --------------------------------------------------------------------------- + +function buildHttpGenerateFn(params: NarratorModelParams) { + const { httpUrl, apiKey, temperature = 0.3 } = params + + return async (prompt: string, opts: { max_new_tokens?: number } = {}): Promise<{ text: string; raw: null }> => { + const endpoint = new URL('/v1/chat/completions', httpUrl).toString() + const headers: Record = { 'Content-Type': 'application/json' } + if (apiKey) headers['Authorization'] = `Bearer ${apiKey}` + + const body = JSON.stringify({ + model: 'default', + messages: [{ role: 'user', content: prompt }], + max_tokens: opts.max_new_tokens ?? 512, + temperature, + }) + + const response = await fetch(endpoint, { method: 'POST', headers, body }) + if (!response.ok) { + const errText = await response.text() + throw new Error(`HTTP ${response.status}: ${errText.slice(0, 200)}`) + } + const data = await response.json() as { + choices?: Array<{ message?: { content?: string } }> + } + const text = data.choices?.[0]?.message?.content ?? '' + return { text, raw: null } + } +} + +// --------------------------------------------------------------------------- +// Factory helpers +// --------------------------------------------------------------------------- + +/** + * Create a disabled-mode provider (safe-by-default, no network calls). + */ +export function createDisabledProvider(name = 'narrator'): ChattydeerNarratorProvider { + return new ChattydeerNarratorProvider({ modelName: name }) +} + +/** + * Create a provider from narrator model params. + */ +export function createChattydeerProvider(name: string, params: NarratorModelParams): ChattydeerNarratorProvider { + return new ChattydeerNarratorProvider({ modelName: name, params }) +} diff --git a/src/core/narrator/index.ts b/src/core/narrator/index.ts new file mode 100644 index 0000000..8b13a0b --- /dev/null +++ b/src/core/narrator/index.ts @@ -0,0 +1,12 @@ +/** + * Barrel exports for the narrator module. + */ + +export type { NarratorProvider, NarrateRequest, NarrateResponse, NarratorModelConfig, NarratorModelParams, CommitEvent, NarrationResult, NarrateCommandOptions, ExplainCommandOptions, NarrateFocus, NarrateFormat } from './types.js' +export { redact, redactAll } from './redact.js' +export type { RedactResult } from './redact.js' +export { recordAudit, withAudit } from './audit.js' +export type { NarratorAuditEntry } from './audit.js' +export { ChattydeerNarratorProvider, createChattydeerProvider, createDisabledProvider } from './chattydeerProvider.js' +export { resolveNarratorProvider, listNarratorConfigs, getNarratorConfigById, getNarratorConfigByName, saveNarratorConfig, deleteNarratorConfig, getActiveNarratorConfig, getActiveNarratorConfigId, setActiveNarratorConfig, clearActiveNarratorConfig, getSetting, setSetting, deleteSetting } from './resolveNarrator.js' +export { runNarrate, runExplain, fetchCommitEvents } from './narrator.js' diff --git a/src/core/narrator/narrator.ts b/src/core/narrator/narrator.ts new file mode 100644 index 0000000..a27b6c4 --- /dev/null +++ b/src/core/narrator/narrator.ts @@ -0,0 +1,298 @@ +/** + * Narrator core — git log parsing, event classification, and LLM summarisation. + * + * All content is redacted before being sent to the LLM provider. + * Output always includes commit hash citations for auditability. + */ + +import { execSync } from 'node:child_process' +import { readFileSync, existsSync } from 'node:fs' +import type { CommitEvent, NarrateCommandOptions, ExplainCommandOptions, NarrationResult } from './types.js' +import { redactAll } from './redact.js' +import type { NarratorProvider } from './types.js' +import { logger } from '../../utils/logger.js' + +// --------------------------------------------------------------------------- +// Git log streaming +// --------------------------------------------------------------------------- + +const MAX_COMMITS_DEFAULT = 500 + +/** + * Stream commits from git log as an array of CommitEvent records. + * Content is NOT redacted here — callers must redact before sending to LLM. + */ +export function fetchCommitEvents(opts: { + since?: string + until?: string + range?: string + maxCommits?: number + cwd?: string +}): CommitEvent[] { + const { since, until, range, maxCommits = MAX_COMMITS_DEFAULT, cwd = process.cwd() } = opts + + // Build git log command + const parts: string[] = [ + 'git', 'log', + `--max-count=${maxCommits}`, + '--format=%H%x1F%ai%x1F%an%x1F%s%x1F%b%x1E', + ] + + if (range) { + parts.push(range) + } else { + if (since) parts.push(`--since="${since}"`) + if (until) parts.push(`--until="${until}"`) + } + + let raw: string + try { + raw = execSync(parts.join(' '), { cwd, maxBuffer: 50 * 1024 * 1024, encoding: 'utf8' }) + } catch (err) { + logger.warn(`[narrator] git log failed: ${err instanceof Error ? err.message : String(err)}`) + return [] + } + + const events: CommitEvent[] = [] + for (const record of raw.split('\x1E')) { + const trimmed = record.trim() + if (!trimmed) continue + const [hash, date, authorName, subject, ...bodyParts] = trimmed.split('\x1F') + if (!hash || !subject) continue + const body = bodyParts.join('\x1F').trim() + events.push({ + hash: hash.trim(), + date: date?.trim() ?? '', + authorName: authorName?.trim() ?? '', + subject: subject.trim(), + body, + tags: classifyEvent(subject, body), + }) + } + + return events +} + +// --------------------------------------------------------------------------- +// Event classification +// --------------------------------------------------------------------------- + +const BUGFIX_RE = /\b(fix|bug|error|crash|revert|hotfix|patch|regression|issue)\b/i +const SECURITY_RE = /\b(cve|vuln|sanitize|security|auth|xss|sqli|injection|leak|privilege)\b/i +const DEPS_RE = /\b(bump|upgrade|update|dependency|deps|dependabot|renovate)\b/i +const PERF_RE = /\b(perf|performance|faster|optimize|speed|memory|cpu|latency)\b/i +const OPS_RE = /\b(ci|cd|deploy|release|docker|k8s|kubernetes|helm|infra|pipeline)\b/i +const FEAT_RE = /\b(feat|feature|add|implement|introduce|new|support)\b/i + +function classifyEvent(subject: string, body: string): string[] { + const text = `${subject} ${body}` + const tags: string[] = [] + if (BUGFIX_RE.test(text)) tags.push('bugfix') + if (SECURITY_RE.test(text)) tags.push('security') + if (DEPS_RE.test(text)) tags.push('deps') + if (PERF_RE.test(text)) tags.push('performance') + if (OPS_RE.test(text)) tags.push('ops') + if (FEAT_RE.test(text)) tags.push('feature') + if (tags.length === 0) tags.push('other') + return tags +} + +// --------------------------------------------------------------------------- +// Focus filtering +// --------------------------------------------------------------------------- + +function filterByFocus(events: CommitEvent[], focus: string): CommitEvent[] { + if (focus === 'all') return events + return events.filter((e) => e.tags.includes(focus)) +} + +// --------------------------------------------------------------------------- +// Map-reduce summarisation +// --------------------------------------------------------------------------- + +const BATCH_SIZE = 100 + +function buildBatchSummaryPrompt(events: CommitEvent[], opts: { focus: string; batchIndex: number }): string { + const lines = events.map((e) => + `[${e.hash.slice(0, 8)}] ${e.date.slice(0, 10)} ${e.authorName}: ${e.subject}`, + ) + return ( + `You are summarizing git commit history for a software project.\n` + + `Focus: ${opts.focus}. Batch ${opts.batchIndex + 1}.\n` + + `Summarize the following commits in 2-3 sentences. Cite commit hashes in square brackets.\n\n` + + lines.join('\n') + ) +} + +function buildFinalNarrativePrompt(batchSummaries: string[], events: CommitEvent[], focus: string): string { + const topCommits = events.slice(0, 10).map((e) => + `[${e.hash.slice(0, 8)}] ${e.subject}`, + ).join('\n') + + return ( + `You are writing a development history narrative for a software project.\n` + + `Focus: ${focus}. Total commits: ${events.length}.\n\n` + + `Batch summaries:\n${batchSummaries.join('\n\n')}\n\n` + + `Notable commits:\n${topCommits}\n\n` + + `Write a concise narrative (3-5 paragraphs) that:\n` + + `1. States the time range and total commit count.\n` + + `2. Identifies the main themes.\n` + + `3. Highlights notable commits with their hashes.\n` + + `4. Notes any risks or unknowns (labeled as inference).\n` + + `Cite commit hashes in square brackets like [abc123de].` + ) +} + +async function summariseBatch( + events: CommitEvent[], + batchIndex: number, + focus: string, + provider: NarratorProvider, +): Promise { + const userPrompt = buildBatchSummaryPrompt(events, { focus, batchIndex }) + const res = await provider.narrate({ systemPrompt: 'You are a concise code history analyst.', userPrompt, maxTokens: 300 }) + return res.prose +} + +// --------------------------------------------------------------------------- +// Main narrate function +// --------------------------------------------------------------------------- + +export async function runNarrate( + provider: NarratorProvider, + opts: NarrateCommandOptions, +): Promise { + const focus = opts.focus ?? 'all' + const format = opts.format ?? 'md' + + // 1. Fetch commits + const allEvents = fetchCommitEvents({ + since: opts.since, + until: opts.until, + range: opts.range, + maxCommits: opts.maxCommits, + }) + + // 2. Filter by focus + const events = filterByFocus(allEvents, focus) + + if (events.length === 0) { + return { + prose: '(No commits matched the specified criteria.)', + commitCount: 0, + citations: [], + redactedFields: [], + llmEnabled: false, + format, + } + } + + // 3. Batch + map-reduce + const batches: CommitEvent[][] = [] + for (let i = 0; i < events.length; i += BATCH_SIZE) { + batches.push(events.slice(i, i + BATCH_SIZE)) + } + + const batchSummaries: string[] = [] + const allRedacted: string[] = [] + + for (let i = 0; i < batches.length; i++) { + const summary = await summariseBatch(batches[i], i, focus, provider) + batchSummaries.push(summary) + } + + // 4. Final narrative + const finalPrompt = buildFinalNarrativePrompt(batchSummaries, events, focus) + const { text: redactedFinal, firedPatterns } = (() => { + const r = redactAll([finalPrompt]) + return { text: r.texts[0], firedPatterns: r.firedPatterns } + })() + for (const p of firedPatterns) { if (!allRedacted.includes(p)) allRedacted.push(p) } + + const finalRes = await provider.narrate({ + systemPrompt: 'You are writing a development history narrative. Be factual, cite commit hashes.', + userPrompt: redactedFinal, + maxTokens: 600, + }) + + const citations = events.slice(0, 20).map((e) => e.hash) + + return { + prose: finalRes.prose, + commitCount: events.length, + citations, + redactedFields: [...allRedacted, ...finalRes.redactedFields], + llmEnabled: finalRes.llmEnabled, + format, + } +} + +// --------------------------------------------------------------------------- +// Explain (error/bug history) +// --------------------------------------------------------------------------- + +export async function runExplain( + provider: NarratorProvider, + topic: string, + opts: ExplainCommandOptions, +): Promise { + const format = opts.format ?? 'md' + + // 1. Fetch commits + const allEvents = fetchCommitEvents({ + since: opts.since, + until: opts.until, + maxCommits: 500, + }) + + // 2. Find relevant commits (keyword match on subject + body) + const keywords = topic.toLowerCase().split(/\s+/) + const relevant = allEvents.filter((e) => { + const text = `${e.subject} ${e.body}`.toLowerCase() + return keywords.some((kw) => text.includes(kw)) + }) + + // 3. Optional: include user-provided log file + let logContent = '' + if (opts.log && existsSync(opts.log)) { + try { + logContent = readFileSync(opts.log, 'utf8').slice(0, 8000) // cap at 8KB + } catch { + // ignore + } + } + + // 4. Build explain prompt + const commitLines = relevant.slice(0, 30).map((e) => + `[${e.hash.slice(0, 8)}] ${e.date.slice(0, 10)} ${e.subject}`, + ) + const userPrompt = [ + `Topic: "${topic}"`, + `Found ${relevant.length} related commits (showing up to 30).`, + relevant.length > 0 ? `\nCommit timeline:\n${commitLines.join('\n')}` : '\nNo related commits found.', + logContent ? `\nError log excerpt:\n${logContent.slice(0, 2000)}` : '', + `\nPlease provide:\n1. A timeline of when this issue appeared.\n2. Likely introduction commit(s) with hashes.\n3. Any fix attempts with commit hashes.\n4. Current status (resolved / ongoing).\nLabel inferences clearly. Cite commit hashes in square brackets.`, + ].filter(Boolean).join('\n') + + const { text: redactedPrompt, firedPatterns } = redactAll([userPrompt]).texts.reduce( + (acc, t, i) => ({ text: t, firedPatterns: redactAll([userPrompt]).firedPatterns }), + { text: userPrompt, firedPatterns: [] as string[] }, + ) + + const res = await provider.narrate({ + systemPrompt: 'You are a software incident analyst. Be factual and cite commit hashes for every claim.', + userPrompt: redactedPrompt, + maxTokens: 512, + }) + + const citations = relevant.slice(0, 20).map((e) => e.hash) + + return { + prose: res.prose, + commitCount: relevant.length, + citations, + redactedFields: [...firedPatterns, ...res.redactedFields], + llmEnabled: res.llmEnabled, + format, + } +} diff --git a/src/core/narrator/redact.ts b/src/core/narrator/redact.ts new file mode 100644 index 0000000..5b851eb --- /dev/null +++ b/src/core/narrator/redact.ts @@ -0,0 +1,130 @@ +/** + * Secret-pattern redaction for narrator/explainer payloads. + * + * All text submitted to a remote LLM is passed through `redact()` first. + * Redaction is conservative — it replaces likely secrets with a fixed + * placeholder rather than removing them, so the structure of the text + * remains readable but credentials cannot leak. + * + * Patterns are intentionally lightweight. For a production system these + * should be replaced with a library like `@secretlint/secretlint-core`. + */ + +// --------------------------------------------------------------------------- +// Pattern definitions +// --------------------------------------------------------------------------- + +interface RedactPattern { + name: string + pattern: RegExp + replacement: string +} + +const REDACT_PATTERNS: RedactPattern[] = [ + // AWS access keys (AKIA...) + { + name: 'aws-access-key', + pattern: /\b(AKIA|ASIA|AROA)[A-Z0-9]{16}\b/g, + replacement: '[REDACTED:aws-access-key]', + }, + // AWS secret keys (40-char base64-like after common assignment patterns) + { + name: 'aws-secret-key', + pattern: /(?:aws_secret|AWS_SECRET)[_A-Za-z]*\s*[=:]\s*["']?([A-Za-z0-9/+]{40})["']?/gi, + replacement: '[REDACTED:aws-secret-key]', + }, + // GitHub PATs (classic ghp_ and fine-grained github_pat_) + { + name: 'github-pat', + pattern: /\b(ghp_[A-Za-z0-9]{36}|github_pat_[A-Za-z0-9_]{82})\b/g, + replacement: '[REDACTED:github-pat]', + }, + // OpenAI / generic sk- API keys + { + name: 'openai-key', + pattern: /\bsk-[A-Za-z0-9_-]{32,}\b/g, + replacement: '[REDACTED:openai-key]', + }, + // Google / Firebase API keys (AIza) + { + name: 'google-api-key', + pattern: /\bAIza[A-Za-z0-9_-]{35}\b/g, + replacement: '[REDACTED:google-api-key]', + }, + // JWT tokens (three base64url segments separated by dots) + { + name: 'jwt', + pattern: /\beyJ[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\.[A-Za-z0-9_-]+\b/g, + replacement: '[REDACTED:jwt]', + }, + // PEM private key blocks + { + name: 'pem-private-key', + pattern: /-----BEGIN (?:RSA |EC |OPENSSH )?PRIVATE KEY-----[\s\S]*?-----END (?:RSA |EC |OPENSSH )?PRIVATE KEY-----/g, + replacement: '[REDACTED:pem-private-key]', + }, + // Generic env-style key=secret assignments (SECRET=, PASSWORD=, TOKEN=, KEY=) + { + name: 'env-secret', + pattern: /\b(?:SECRET|PASSWORD|PASSWD|TOKEN|API_KEY|APIKEY|AUTH_TOKEN)\s*[=:]\s*["']?[^\s"',;\[]{8,}["']?/gi, + replacement: '[REDACTED:env-secret]', + }, + // Private RFC-1918 addresses are not secrets but we strip them to avoid + // leaking internal network topology — useful in error-log contexts. + { + name: 'private-ip', + pattern: /\b(10\.\d{1,3}\.\d{1,3}\.\d{1,3}|172\.(1[6-9]|2\d|3[01])\.\d{1,3}\.\d{1,3}|192\.168\.\d{1,3}\.\d{1,3})\b/g, + replacement: '[REDACTED:private-ip]', + }, + // Email addresses (can reveal internal employee info) + { + name: 'email', + pattern: /\b[A-Za-z0-9._%+\-]+@[A-Za-z0-9.\-]+\.[A-Za-z]{2,}\b/g, + replacement: '[REDACTED:email]', + }, +] + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +export interface RedactResult { + text: string + /** Names of patterns that fired (deduplicated). */ + firedPatterns: string[] +} + +/** + * Apply all redaction patterns to `input`. + * + * Returns the cleaned text and the list of pattern names that matched. + * The function is pure — it does not modify any global state. + */ +export function redact(input: string): RedactResult { + const firedSet = new Set() + let text = input + for (const { name, pattern, replacement } of REDACT_PATTERNS) { + // Reset lastIndex for global patterns (safety guard) + pattern.lastIndex = 0 + const replaced = text.replace(pattern, () => { + firedSet.add(name) + return replacement + }) + text = replaced + } + return { text, firedPatterns: Array.from(firedSet) } +} + +/** + * Redact an array of text strings and collect all fired patterns. + * Returns the cleaned strings and combined fired pattern list. + */ +export function redactAll(inputs: string[]): { texts: string[]; firedPatterns: string[] } { + const allFired = new Set() + const texts = inputs.map((input) => { + const { text, firedPatterns } = redact(input) + for (const p of firedPatterns) allFired.add(p) + return text + }) + return { texts, firedPatterns: Array.from(allFired) } +} diff --git a/src/core/narrator/resolveNarrator.ts b/src/core/narrator/resolveNarrator.ts new file mode 100644 index 0000000..d392ed9 --- /dev/null +++ b/src/core/narrator/resolveNarrator.ts @@ -0,0 +1,205 @@ +/** + * Narrator model config — DB-backed storage and retrieval. + * + * Narrator configs share the `embed_config` table with embedding configs, + * distinguished by `kind = 'narrator'`. + * + * Active narrator selection is stored in the `settings` table under the key + * `active_narrator_model_config_id` (integer embed_config.id). + */ + +import type Database from 'better-sqlite3' +import type { NarratorModelConfig, NarratorModelParams } from './types.js' +import { createHash } from 'node:crypto' +import { createChattydeerProvider, createDisabledProvider } from './chattydeerProvider.js' +import type { ChattydeerNarratorProvider } from './chattydeerProvider.js' +import { getActiveSession } from '../db/sqlite.js' + +// --------------------------------------------------------------------------- +// Settings helpers +// --------------------------------------------------------------------------- + +export function getSetting(rawDb: InstanceType, key: string): string | null { + const row = rawDb.prepare(`SELECT value FROM settings WHERE key = ?`).get(key) as { value: string } | undefined + return row?.value ?? null +} + +export function setSetting(rawDb: InstanceType, key: string, value: string): void { + rawDb.prepare(`INSERT OR REPLACE INTO settings (key, value) VALUES (?, ?)`).run(key, value) +} + +export function deleteSetting(rawDb: InstanceType, key: string): void { + rawDb.prepare(`DELETE FROM settings WHERE key = ?`).run(key) +} + +// --------------------------------------------------------------------------- +// Narrator config rows +// --------------------------------------------------------------------------- + +interface NarratorRow { + id: number + config_hash: string + provider: string + model: string + params_json: string | null + created_at: number + last_used_at: number | null +} + +function rowToConfig(row: NarratorRow): NarratorModelConfig { + let params: NarratorModelParams = { httpUrl: '' } + if (row.params_json) { + try { + params = JSON.parse(row.params_json) as NarratorModelParams + } catch { + // malformed JSON — leave default + } + } + return { + id: row.id, + name: row.model, + provider: row.provider, + params, + createdAt: row.created_at, + lastUsedAt: row.last_used_at ?? undefined, + } +} + +/** + * List all narrator model configs in the DB. + */ +export function listNarratorConfigs(rawDb: InstanceType): NarratorModelConfig[] { + const tables = rawDb.prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name='embed_config'`).all() as Array<{ name: string }> + if (tables.length === 0) return [] + const rows = rawDb.prepare(`SELECT id, config_hash, provider, model, params_json, created_at, last_used_at FROM embed_config WHERE kind = 'narrator' ORDER BY created_at ASC`).all() as NarratorRow[] + return rows.map(rowToConfig) +} + +/** + * Get a narrator config by its embed_config.id. + */ +export function getNarratorConfigById(rawDb: InstanceType, id: number): NarratorModelConfig | null { + const row = rawDb.prepare(`SELECT id, config_hash, provider, model, params_json, created_at, last_used_at FROM embed_config WHERE id = ? AND kind = 'narrator'`).get(id) as NarratorRow | undefined + return row ? rowToConfig(row) : null +} + +/** + * Get a narrator config by model name. + */ +export function getNarratorConfigByName(rawDb: InstanceType, name: string): NarratorModelConfig | null { + const row = rawDb.prepare(`SELECT id, config_hash, provider, model, params_json, created_at, last_used_at FROM embed_config WHERE model = ? AND kind = 'narrator'`).get(name) as NarratorRow | undefined + return row ? rowToConfig(row) : null +} + +/** + * Save a narrator model config to the DB. Returns the embed_config.id. + */ +export function saveNarratorConfig( + rawDb: InstanceType, + name: string, + provider: string, + params: NarratorModelParams, +): number { + // config_hash is a deterministic hash of (kind, name, provider, params) + const hashInput = JSON.stringify({ kind: 'narrator', name, provider, params }) + const configHash = createHash('sha256').update(hashInput).digest('hex') + const now = Math.floor(Date.now() / 1000) + const paramsJson = JSON.stringify(params) + + rawDb.prepare(` + INSERT OR IGNORE INTO embed_config + (config_hash, provider, model, code_model, dimensions, chunker, window_size, overlap, created_at, kind, params_json) + VALUES (?, ?, ?, NULL, 0, 'none', NULL, NULL, ?, 'narrator', ?) + `).run(configHash, provider, name, now, paramsJson) + + // Update params_json on re-add (params may have changed) + rawDb.prepare(`UPDATE embed_config SET params_json = ?, last_used_at = ? WHERE config_hash = ?`) + .run(paramsJson, now, configHash) + + const row = rawDb.prepare(`SELECT id FROM embed_config WHERE config_hash = ?`).get(configHash) as { id: number } + return row.id +} + +/** + * Delete a narrator config by name. + * Returns true if a row was deleted. + */ +export function deleteNarratorConfig(rawDb: InstanceType, name: string): boolean { + const res = rawDb.prepare(`DELETE FROM embed_config WHERE model = ? AND kind = 'narrator'`).run(name) + return res.changes > 0 +} + +// --------------------------------------------------------------------------- +// Active narrator selection +// --------------------------------------------------------------------------- + +const ACTIVE_NARRATOR_KEY = 'active_narrator_model_config_id' + +/** + * Get the currently active narrator config ID, or null if not set. + */ +export function getActiveNarratorConfigId(rawDb: InstanceType): number | null { + const val = getSetting(rawDb, ACTIVE_NARRATOR_KEY) + if (val === null) return null + const n = parseInt(val, 10) + return Number.isFinite(n) ? n : null +} + +/** + * Set the active narrator config by embed_config.id. + */ +export function setActiveNarratorConfig(rawDb: InstanceType, id: number): void { + setSetting(rawDb, ACTIVE_NARRATOR_KEY, String(id)) +} + +/** + * Clear the active narrator config selection. + */ +export function clearActiveNarratorConfig(rawDb: InstanceType): void { + deleteSetting(rawDb, ACTIVE_NARRATOR_KEY) +} + +/** + * Get the currently active narrator config object, or null if not set / not found. + */ +export function getActiveNarratorConfig(rawDb: InstanceType): NarratorModelConfig | null { + const id = getActiveNarratorConfigId(rawDb) + if (id === null) return null + return getNarratorConfigById(rawDb, id) +} + +// --------------------------------------------------------------------------- +// Provider resolution +// --------------------------------------------------------------------------- + +/** + * Resolve the active NarratorProvider from the DB session. + * + * Resolution order: + * 1. `narratorModelId` CLI option (explicit embed_config.id) + * 2. `modelName` CLI option (looks up by name) + * 3. Active narrator config from settings table + * 4. Disabled (safe-by-default) + */ +export function resolveNarratorProvider(opts: { + narratorModelId?: number + modelName?: string +} = {}): ChattydeerNarratorProvider { + const { rawDb } = getActiveSession() + + let config: NarratorModelConfig | null = null + + if (opts.narratorModelId !== undefined) { + config = getNarratorConfigById(rawDb, opts.narratorModelId) + } else if (opts.modelName) { + config = getNarratorConfigByName(rawDb, opts.modelName) + } else { + config = getActiveNarratorConfig(rawDb) + } + + if (!config) { + return createDisabledProvider() + } + + return createChattydeerProvider(config.name, config.params) +} diff --git a/src/core/narrator/types.ts b/src/core/narrator/types.ts new file mode 100644 index 0000000..9b32d64 --- /dev/null +++ b/src/core/narrator/types.ts @@ -0,0 +1,126 @@ +/** + * Core types for the narrator/explainer subsystem. + * + * NarratorProvider is the interface that all LLM backends must implement. + * The canonical implementation backed by @jsilvanus/chattydeer lives in + * chattydeerProvider.ts. + */ + +// --------------------------------------------------------------------------- +// Provider interface +// --------------------------------------------------------------------------- + +export interface NarrateRequest { + /** System prompt / instruction for the LLM. */ + systemPrompt: string + /** User-facing prompt text (already redacted before reaching the provider). */ + userPrompt: string + /** Soft cap on output tokens (provider may ignore if unsupported). */ + maxTokens?: number +} + +export interface NarrateResponse { + /** Generated narrative text. */ + prose: string + /** Approximate tokens consumed (0 when unavailable). */ + tokensUsed: number + /** Redacted field pattern names that were removed from the payload. */ + redactedFields: string[] + /** True when narration was actually attempted; false in safe-by-default mode. */ + llmEnabled: boolean +} + +/** + * LLM backend that can generate human-readable narrative text. + * Implementations must be safe-by-default (refuse to make network calls + * unless explicitly enabled). + */ +export interface NarratorProvider { + readonly modelName: string + narrate(req: NarrateRequest): Promise + destroy(): Promise +} + +// --------------------------------------------------------------------------- +// Narrator model config (stored in embed_config with kind='narrator') +// --------------------------------------------------------------------------- + +export interface NarratorModelParams { + /** OpenAI-compatible base URL for the LLM endpoint. Required. */ + httpUrl: string + /** Bearer token / API key. Optional. */ + apiKey?: string + /** Max tokens per narration call (default 512). */ + maxTokens?: number + /** Temperature (0 = deterministic, default 0.3). */ + temperature?: number +} + +export interface NarratorModelConfig { + /** embed_config.id in the DB. */ + id: number + /** Human-readable name / local alias (stored as embed_config.model). */ + name: string + /** Provider family (e.g. 'chattydeer', 'http'). */ + provider: string + /** Decoded NarratorModelParams from embed_config.params_json. */ + params: NarratorModelParams + createdAt: number + lastUsedAt?: number +} + +// --------------------------------------------------------------------------- +// CLI / route payloads +// --------------------------------------------------------------------------- + +export type NarrateFocus = 'bugs' | 'features' | 'ops' | 'security' | 'deps' | 'performance' | 'all' +export type NarrateFormat = 'md' | 'text' | 'json' + +export interface NarrateCommandOptions { + since?: string + until?: string + range?: string + focus?: NarrateFocus + format?: NarrateFormat + maxCommits?: number + narratorModelId?: number + /** Raw CLI model override (name, looked up in embed_config by name). */ + model?: string +} + +export interface ExplainCommandOptions { + since?: string + until?: string + log?: string + files?: string + format?: NarrateFormat + narratorModelId?: number + model?: string +} + +// --------------------------------------------------------------------------- +// Commit event (lightweight, extracted from git log) +// --------------------------------------------------------------------------- + +export interface CommitEvent { + hash: string + date: string + authorName: string + subject: string + body: string + /** Heuristic tags assigned during classification */ + tags: string[] +} + +// --------------------------------------------------------------------------- +// Narration output +// --------------------------------------------------------------------------- + +export interface NarrationResult { + prose: string + commitCount: number + citations: string[] + redactedFields: string[] + llmEnabled: boolean + format: NarrateFormat +} diff --git a/src/mcp/server.ts b/src/mcp/server.ts index 99c7f1f..0b25279 100644 --- a/src/mcp/server.ts +++ b/src/mcp/server.ts @@ -11,6 +11,7 @@ import { registerAnalysisTools } from './tools/analysis.js' import { registerClusteringTools } from './tools/clustering.js' import { registerWorkflowTools } from './tools/workflow.js' import { registerInfrastructureTools } from './tools/infrastructure.js' +import { registerNarratorTools } from './tools/narrator.js' export async function startMcpServer(): Promise { const server = new McpServer({ @@ -24,6 +25,7 @@ export async function startMcpServer(): Promise { registerClusteringTools(server) registerWorkflowTools(server) registerInfrastructureTools(server) + registerNarratorTools(server) const transport = new StdioServerTransport() await server.connect(transport) @@ -74,5 +76,7 @@ server.tool('policy_check', ...) server.tool('ownership', ...) server.tool('workflow_run', ...) server.tool('eval', ...) +server.tool('narrate_repo', ...) +server.tool('explain_issue_or_error', ...) */ diff --git a/src/mcp/tools/narrator.ts b/src/mcp/tools/narrator.ts new file mode 100644 index 0000000..63e60c8 --- /dev/null +++ b/src/mcp/tools/narrator.ts @@ -0,0 +1,118 @@ +/** + * MCP tool registrations for narrator/explainer functionality. + * + * Tools: + * narrate_repo — generate a narrative of repo development history + * explain_issue_or_error — explain a bug/error topic via git history + */ + +import { z } from 'zod' +import type { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js' +import { resolveNarratorProvider, runNarrate, runExplain } from '../../core/narrator/index.js' + +export function registerNarratorTools(server: McpServer) { + // narrate_repo + server.tool( + 'narrate_repo', + 'Generate a human-readable narrative of repository development history using the configured LLM narrator model. Safe-by-default: returns a placeholder when no narrator model is configured.', + { + since: z.string().optional().describe('Only include commits after this ref or date (e.g. "v1.0", "2024-01-01")'), + until: z.string().optional().describe('Only include commits before this ref or date'), + range: z.string().optional().describe('Git revision range (e.g. "v1.0..HEAD")'), + focus: z.enum(['bugs', 'features', 'ops', 'security', 'deps', 'performance', 'all']).optional().default('all').describe('Filter commits by category'), + format: z.enum(['md', 'text', 'json']).optional().default('md').describe('Output format'), + max_commits: z.number().int().positive().optional().describe('Maximum commits to analyse'), + narrator_model_id: z.number().int().positive().optional().describe('embed_config.id of the narrator model to use'), + model: z.string().optional().describe('Narrator model name to use (overrides active selection)'), + }, + async ({ since, until, range, focus, format, max_commits, narrator_model_id, model }) => { + const provider = resolveNarratorProvider({ + narratorModelId: narrator_model_id, + modelName: model, + }) + + try { + const result = await runNarrate(provider, { + since, + until, + range, + focus: focus as 'bugs' | 'features' | 'ops' | 'security' | 'deps' | 'performance' | 'all', + format: format as 'md' | 'text' | 'json', + maxCommits: max_commits, + }) + + let text: string + if (format === 'json') { + text = JSON.stringify({ + prose: result.prose, + commitCount: result.commitCount, + citations: result.citations, + llmEnabled: result.llmEnabled, + }, null, 2) + } else { + text = [ + result.prose, + '', + result.citations.length > 0 ? `Citations: ${result.citations.slice(0, 10).map((h) => h.slice(0, 12)).join(', ')}` : '', + `${result.commitCount} commit(s) analysed`, + result.redactedFields.length > 0 ? `Redacted: ${result.redactedFields.join(', ')}` : '', + ].filter(Boolean).join('\n') + } + + return { content: [{ type: 'text' as const, text }] } + } finally { + await provider.destroy() + } + }, + ) + + // explain_issue_or_error + server.tool( + 'explain_issue_or_error', + 'Explain a bug, error, or concept by tracing it through git history using the configured LLM narrator model. Returns a timeline with commit citations.', + { + topic: z.string().min(1).describe('The bug, error message, or concept to explain (e.g. "NullPointerException in auth handler")'), + since: z.string().optional().describe('Only include commits after this ref or date'), + until: z.string().optional().describe('Only include commits before this ref or date'), + format: z.enum(['md', 'text', 'json']).optional().default('md').describe('Output format'), + narrator_model_id: z.number().int().positive().optional().describe('embed_config.id of the narrator model to use'), + model: z.string().optional().describe('Narrator model name to use (overrides active selection)'), + }, + async ({ topic, since, until, format, narrator_model_id, model }) => { + const provider = resolveNarratorProvider({ + narratorModelId: narrator_model_id, + modelName: model, + }) + + try { + const result = await runExplain(provider, topic, { + since, + until, + format: format as 'md' | 'text' | 'json', + }) + + let text: string + if (format === 'json') { + text = JSON.stringify({ + prose: result.prose, + commitCount: result.commitCount, + citations: result.citations, + llmEnabled: result.llmEnabled, + }, null, 2) + } else { + text = [ + result.prose, + '', + result.citations.length > 0 ? `Citations: ${result.citations.slice(0, 10).map((h) => h.slice(0, 12)).join(', ')}` : '', + `${result.commitCount} related commit(s) found`, + result.redactedFields.length > 0 ? `Redacted: ${result.redactedFields.join(', ')}` : '', + ].filter(Boolean).join('\n') + } + + return { content: [{ type: 'text' as const, text }] } + } finally { + await provider.destroy() + } + }, + ) +} diff --git a/src/server/app.ts b/src/server/app.ts index c05ba72..e06a7a4 100644 --- a/src/server/app.ts +++ b/src/server/app.ts @@ -57,6 +57,7 @@ import { analysisRouter } from './routes/analysis.js' import { watchRouter } from './routes/watch.js' import { projectionsRouter } from './routes/projections.js' import { openapiRouter } from './routes/openapi.js' +import { narratorRouter } from './routes/narrator.js' import { getActiveSession } from '../core/db/sqlite.js' import { readFileSync, existsSync } from 'node:fs' import { join, dirname } from 'node:path' @@ -165,6 +166,9 @@ export function createApp(options: AppOptions): Express { app.use(`${base}/projections`, projectionsRouter()) + // Narrator routes (LLM-powered narrative generation) + app.use(`${base}`, narratorRouter) + // Phase 64: Capabilities manifest — machine-readable list of server capabilities app.get(`${base}/capabilities`, (_req, res) => { res.json({ diff --git a/src/server/routes/narrator.ts b/src/server/routes/narrator.ts new file mode 100644 index 0000000..55e6f14 --- /dev/null +++ b/src/server/routes/narrator.ts @@ -0,0 +1,121 @@ +/** + * HTTP routes for narrator/explainer endpoints. + * + * Routes (under /api/v1/): + * POST /narrate — generate a narrative of repository development history + * POST /explain — explain a bug/error topic by tracing through git history + * + * Both routes use the DB-backed narrator model config system. + * Safe-by-default: returns a placeholder when no narrator model is configured. + */ + +import { Router } from 'express' +import { z } from 'zod' +import { resolveNarratorProvider, runNarrate, runExplain } from '../../core/narrator/index.js' + +export const narratorRouter = Router() + +// --------------------------------------------------------------------------- +// Schema +// --------------------------------------------------------------------------- + +const NarrateBodySchema = z.object({ + since: z.string().optional(), + until: z.string().optional(), + range: z.string().optional(), + focus: z.enum(['bugs', 'features', 'ops', 'security', 'deps', 'performance', 'all']).optional().default('all'), + format: z.enum(['md', 'text', 'json']).optional().default('md'), + maxCommits: z.number().int().positive().optional(), + narratorModelId: z.number().int().positive().optional(), + model: z.string().optional(), +}) + +const ExplainBodySchema = z.object({ + topic: z.string().min(1), + since: z.string().optional(), + until: z.string().optional(), + format: z.enum(['md', 'text', 'json']).optional().default('md'), + narratorModelId: z.number().int().positive().optional(), + model: z.string().optional(), +}) + +// --------------------------------------------------------------------------- +// POST /narrate +// --------------------------------------------------------------------------- + +narratorRouter.post('/narrate', async (req, res) => { + const parsed = NarrateBodySchema.safeParse(req.body) + if (!parsed.success) { + res.status(400).json({ error: 'Invalid request body', details: parsed.error.flatten() }) + return + } + + const body = parsed.data + const provider = resolveNarratorProvider({ + narratorModelId: body.narratorModelId, + modelName: body.model, + }) + + try { + const result = await runNarrate(provider, { + since: body.since, + until: body.until, + range: body.range, + focus: body.focus, + format: body.format, + maxCommits: body.maxCommits, + }) + res.json({ + prose: result.prose, + commitCount: result.commitCount, + citations: result.citations, + redactedFields: result.redactedFields, + llmEnabled: result.llmEnabled, + format: result.format, + }) + } catch (err) { + const msg = err instanceof Error ? err.message : String(err) + res.status(500).json({ error: msg }) + } finally { + await provider.destroy() + } +}) + +// --------------------------------------------------------------------------- +// POST /explain +// --------------------------------------------------------------------------- + +narratorRouter.post('/explain', async (req, res) => { + const parsed = ExplainBodySchema.safeParse(req.body) + if (!parsed.success) { + res.status(400).json({ error: 'Invalid request body', details: parsed.error.flatten() }) + return + } + + const body = parsed.data + const provider = resolveNarratorProvider({ + narratorModelId: body.narratorModelId, + modelName: body.model, + }) + + try { + const result = await runExplain(provider, body.topic, { + since: body.since, + until: body.until, + format: body.format, + }) + res.json({ + prose: result.prose, + commitCount: result.commitCount, + citations: result.citations, + redactedFields: result.redactedFields, + llmEnabled: result.llmEnabled, + format: result.format, + }) + } catch (err) { + const msg = err instanceof Error ? err.message : String(err) + res.status(500).json({ error: msg }) + } finally { + await provider.destroy() + } +}) diff --git a/tests/narratorConfig.test.ts b/tests/narratorConfig.test.ts new file mode 100644 index 0000000..4046946 --- /dev/null +++ b/tests/narratorConfig.test.ts @@ -0,0 +1,196 @@ +/** + * Tests for DB-backed narrator model config. + * + * Covers: + * - saveNarratorConfig / listNarratorConfigs / getNarratorConfigByName / deleteNarratorConfig + * - setActiveNarratorConfig / getActiveNarratorConfig / clearActiveNarratorConfig + * - resolveNarratorProvider (disabled when unconfigured) + * - embed_config kind filtering (narrator configs don't appear in embedding lists) + */ +import { describe, it, expect, afterEach, vi } from 'vitest' +import { openDatabaseAt } from '../src/core/db/sqlite.js' +import { + listNarratorConfigs, + getNarratorConfigById, + getNarratorConfigByName, + saveNarratorConfig, + deleteNarratorConfig, + getActiveNarratorConfig, + getActiveNarratorConfigId, + setActiveNarratorConfig, + clearActiveNarratorConfig, +} from '../src/core/narrator/resolveNarrator.js' + +// --------------------------------------------------------------------------- +// Test DB setup +// --------------------------------------------------------------------------- + +const testSession = openDatabaseAt(':memory:') +const rawDb = testSession.rawDb + +// --------------------------------------------------------------------------- +// Narrator config CRUD +// --------------------------------------------------------------------------- + +describe('listNarratorConfigs()', () => { + afterEach(() => { + rawDb.exec(`DELETE FROM embed_config WHERE kind = 'narrator'`) + rawDb.exec(`DELETE FROM settings`) + }) + + it('returns empty array when no narrator configs exist', () => { + const configs = listNarratorConfigs(rawDb) + expect(configs).toEqual([]) + }) + + it('saves and retrieves a narrator config', () => { + saveNarratorConfig(rawDb, 'my-narrator', 'chattydeer', { httpUrl: 'http://localhost:8080', apiKey: 'tok' }) + const configs = listNarratorConfigs(rawDb) + expect(configs).toHaveLength(1) + expect(configs[0].name).toBe('my-narrator') + expect(configs[0].provider).toBe('chattydeer') + expect(configs[0].params.httpUrl).toBe('http://localhost:8080') + expect(configs[0].params.apiKey).toBe('tok') + }) + + it('does not include embedding configs in narrator list', () => { + // Insert an embedding-kind row manually + rawDb.exec(` + INSERT INTO embed_config (config_hash, provider, model, dimensions, chunker, created_at, kind) + VALUES ('hash_emb', 'ollama', 'nomic-embed-text', 768, 'file', ${Math.floor(Date.now() / 1000)}, 'embedding') + `) + const configs = listNarratorConfigs(rawDb) + expect(configs.every((c) => c.provider !== 'ollama')).toBe(true) + }) + + it('returns multiple narrator configs ordered by created_at', () => { + saveNarratorConfig(rawDb, 'narrator-a', 'chattydeer', { httpUrl: 'http://a.example.com' }) + saveNarratorConfig(rawDb, 'narrator-b', 'chattydeer', { httpUrl: 'http://b.example.com' }) + const configs = listNarratorConfigs(rawDb) + expect(configs).toHaveLength(2) + expect(configs[0].name).toBe('narrator-a') + expect(configs[1].name).toBe('narrator-b') + }) +}) + +describe('getNarratorConfigByName()', () => { + afterEach(() => { + rawDb.exec(`DELETE FROM embed_config WHERE kind = 'narrator'`) + rawDb.exec(`DELETE FROM settings`) + }) + + it('returns null for unknown name', () => { + expect(getNarratorConfigByName(rawDb, 'ghost')).toBeNull() + }) + + it('returns config for known name', () => { + saveNarratorConfig(rawDb, 'known', 'chattydeer', { httpUrl: 'http://x.example.com' }) + const config = getNarratorConfigByName(rawDb, 'known') + expect(config).not.toBeNull() + expect(config!.name).toBe('known') + }) +}) + +describe('getNarratorConfigById()', () => { + afterEach(() => { + rawDb.exec(`DELETE FROM embed_config WHERE kind = 'narrator'`) + rawDb.exec(`DELETE FROM settings`) + }) + + it('returns null for unknown id', () => { + expect(getNarratorConfigById(rawDb, 99999)).toBeNull() + }) + + it('returns config for known id', () => { + const id = saveNarratorConfig(rawDb, 'by-id', 'chattydeer', { httpUrl: 'http://y.example.com' }) + const config = getNarratorConfigById(rawDb, id) + expect(config).not.toBeNull() + expect(config!.id).toBe(id) + }) +}) + +describe('deleteNarratorConfig()', () => { + afterEach(() => { + rawDb.exec(`DELETE FROM embed_config WHERE kind = 'narrator'`) + rawDb.exec(`DELETE FROM settings`) + }) + + it('returns false when no config to delete', () => { + expect(deleteNarratorConfig(rawDb, 'ghost')).toBe(false) + }) + + it('removes the config and returns true', () => { + saveNarratorConfig(rawDb, 'del-me', 'chattydeer', { httpUrl: 'http://z.example.com' }) + expect(deleteNarratorConfig(rawDb, 'del-me')).toBe(true) + expect(getNarratorConfigByName(rawDb, 'del-me')).toBeNull() + }) +}) + +// --------------------------------------------------------------------------- +// Active narrator selection +// --------------------------------------------------------------------------- + +describe('active narrator selection', () => { + afterEach(() => { + rawDb.exec(`DELETE FROM embed_config WHERE kind = 'narrator'`) + rawDb.exec(`DELETE FROM settings`) + }) + + it('returns null when no active narrator is set', () => { + expect(getActiveNarratorConfigId(rawDb)).toBeNull() + expect(getActiveNarratorConfig(rawDb)).toBeNull() + }) + + it('returns the active config after setActiveNarratorConfig', () => { + const id = saveNarratorConfig(rawDb, 'active-test', 'chattydeer', { httpUrl: 'http://active.example.com' }) + setActiveNarratorConfig(rawDb, id) + expect(getActiveNarratorConfigId(rawDb)).toBe(id) + const active = getActiveNarratorConfig(rawDb) + expect(active).not.toBeNull() + expect(active!.name).toBe('active-test') + }) + + it('returns null after clearActiveNarratorConfig', () => { + const id = saveNarratorConfig(rawDb, 'clear-test', 'chattydeer', { httpUrl: 'http://clear.example.com' }) + setActiveNarratorConfig(rawDb, id) + clearActiveNarratorConfig(rawDb) + expect(getActiveNarratorConfigId(rawDb)).toBeNull() + expect(getActiveNarratorConfig(rawDb)).toBeNull() + }) + + it('can switch the active narrator by setting a new id', () => { + const id1 = saveNarratorConfig(rawDb, 'first', 'chattydeer', { httpUrl: 'http://first.example.com' }) + const id2 = saveNarratorConfig(rawDb, 'second', 'chattydeer', { httpUrl: 'http://second.example.com' }) + setActiveNarratorConfig(rawDb, id1) + expect(getActiveNarratorConfig(rawDb)!.name).toBe('first') + setActiveNarratorConfig(rawDb, id2) + expect(getActiveNarratorConfig(rawDb)!.name).toBe('second') + }) +}) + +// --------------------------------------------------------------------------- +// resolveNarratorProvider (via mock session) +// --------------------------------------------------------------------------- + +describe('resolveNarratorProvider()', () => { + afterEach(() => { + rawDb.exec(`DELETE FROM embed_config WHERE kind = 'narrator'`) + rawDb.exec(`DELETE FROM settings`) + vi.restoreAllMocks() + }) + + it('returns a disabled provider when no narrator model is configured', async () => { + // Mock getActiveSession to use our in-memory DB + vi.doMock('../src/core/db/sqlite.js', async (importOriginal) => { + const actual = await importOriginal() + return { ...actual, getActiveSession: () => testSession } + }) + const { resolveNarratorProvider: resolveNarrator } = await import('../src/core/narrator/resolveNarrator.js') + const provider = resolveNarrator({}) + expect(provider.modelName).toBe('narrator') + + const res = await provider.narrate({ systemPrompt: 'sys', userPrompt: 'user' }) + expect(res.llmEnabled).toBe(false) + expect(res.prose).toContain('narrator disabled') + }) +}) diff --git a/tests/narratorRedact.test.ts b/tests/narratorRedact.test.ts new file mode 100644 index 0000000..508060c --- /dev/null +++ b/tests/narratorRedact.test.ts @@ -0,0 +1,158 @@ +/** + * Tests for src/core/narrator/redact.ts + * + * Verifies that the redaction layer catches common secret patterns + * before content is sent to a remote LLM provider. + */ +import { describe, it, expect } from 'vitest' +import { redact, redactAll } from '../src/core/narrator/redact.js' + +// --------------------------------------------------------------------------- +// redact() +// --------------------------------------------------------------------------- + +describe('redact()', () => { + it('passes through clean text unchanged', () => { + const { text, firedPatterns } = redact('Hello, world! This is a normal commit message.') + expect(text).toBe('Hello, world! This is a normal commit message.') + expect(firedPatterns).toHaveLength(0) + }) + + it('redacts AWS access keys', () => { + const { text, firedPatterns } = redact('AWS key: AKIAIOSFODNN7EXAMPLE is used here') + expect(text).toContain('[REDACTED:aws-access-key]') + expect(text).not.toContain('AKIAIOSFODNN7EXAMPLE') + expect(firedPatterns).toContain('aws-access-key') + }) + + it('redacts GitHub PATs (ghp_ prefix)', () => { + const token = 'ghp_' + 'A'.repeat(36) + const { text, firedPatterns } = redact(`Using token: ${token}`) + expect(text).not.toContain(token) + expect(text).toContain('[REDACTED:github-pat]') + expect(firedPatterns).toContain('github-pat') + }) + + it('redacts OpenAI sk- keys', () => { + const key = 'sk-' + 'x'.repeat(48) + const { text, firedPatterns } = redact(`API_KEY=${key}`) + expect(text).not.toContain(key) + expect(text).toContain('[REDACTED:openai-key]') + expect(firedPatterns).toContain('openai-key') + }) + + it('redacts Google API keys (AIza prefix)', () => { + const key = 'AIza' + 'B'.repeat(35) + const { text, firedPatterns } = redact(`Setting key=${key}`) + expect(text).not.toContain(key) + expect(text).toContain('[REDACTED:google-api-key]') + expect(firedPatterns).toContain('google-api-key') + }) + + it('redacts JWT tokens', () => { + // Valid-shape JWT (three base64url segments) + const jwt = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c' + const { text, firedPatterns } = redact(`Authorization: Bearer ${jwt}`) + expect(text).not.toContain(jwt) + expect(text).toContain('[REDACTED:jwt]') + expect(firedPatterns).toContain('jwt') + }) + + it('redacts PEM private key blocks', () => { + const pem = '-----BEGIN RSA PRIVATE KEY-----\nMIIE...\n-----END RSA PRIVATE KEY-----' + const { text, firedPatterns } = redact(pem) + expect(text).not.toContain('MIIE') + expect(text).toContain('[REDACTED:pem-private-key]') + expect(firedPatterns).toContain('pem-private-key') + }) + + it('redacts generic SECRET= assignments', () => { + const { text, firedPatterns } = redact('SECRET=supersecretpassword123') + expect(text).not.toContain('supersecretpassword123') + expect(firedPatterns).toContain('env-secret') + }) + + it('redacts TOKEN= assignments', () => { + const { text, firedPatterns } = redact('TOKEN=mytoken123456') + expect(text).not.toContain('mytoken123456') + expect(firedPatterns).toContain('env-secret') + }) + + it('redacts private IP addresses', () => { + const { text, firedPatterns } = redact('Server at 192.168.1.100 is down') + expect(text).not.toContain('192.168.1.100') + expect(text).toContain('[REDACTED:private-ip]') + expect(firedPatterns).toContain('private-ip') + }) + + it('redacts 10.x.x.x private IP range', () => { + const { text, firedPatterns } = redact('Database host: 10.0.0.1') + expect(text).not.toContain('10.0.0.1') + expect(firedPatterns).toContain('private-ip') + }) + + it('redacts email addresses', () => { + const { text, firedPatterns } = redact('Contact: alice@example.com for access') + expect(text).not.toContain('alice@example.com') + expect(text).toContain('[REDACTED:email]') + expect(firedPatterns).toContain('email') + }) + + it('reports multiple fired patterns when multiple secrets present', () => { + const key = 'sk-' + 'z'.repeat(48) + const { firedPatterns } = redact(`Token: ${key} and email: bob@corp.io`) + expect(firedPatterns).toContain('openai-key') + expect(firedPatterns).toContain('email') + }) + + it('deduplicates fired patterns when the same secret appears twice', () => { + const key = 'sk-' + 'y'.repeat(48) + const { firedPatterns } = redact(`Key1=${key} Key2=${key}`) + const openAiPatterns = firedPatterns.filter((p) => p === 'openai-key') + expect(openAiPatterns).toHaveLength(1) + }) + + it('does not flag public IP addresses', () => { + const { firedPatterns } = redact('Public IP: 8.8.8.8 is Google DNS') + expect(firedPatterns).not.toContain('private-ip') + }) +}) + +// --------------------------------------------------------------------------- +// redactAll() +// --------------------------------------------------------------------------- + +describe('redactAll()', () => { + it('processes an array of strings', () => { + const key = 'sk-' + 'a'.repeat(48) + const { texts, firedPatterns } = redactAll([ + 'First commit message', + `API_KEY=${key}`, + 'Another clean message', + ]) + expect(texts).toHaveLength(3) + expect(texts[0]).toBe('First commit message') + expect(texts[1]).not.toContain(key) + expect(texts[2]).toBe('Another clean message') + expect(firedPatterns).toContain('openai-key') + }) + + it('returns empty firedPatterns for all-clean inputs', () => { + const { texts, firedPatterns } = redactAll(['Clean text', 'Also clean']) + expect(firedPatterns).toHaveLength(0) + expect(texts[0]).toBe('Clean text') + }) + + it('handles an empty array', () => { + const { texts, firedPatterns } = redactAll([]) + expect(texts).toHaveLength(0) + expect(firedPatterns).toHaveLength(0) + }) + + it('deduplicates patterns across multiple strings', () => { + const key = 'sk-' + 'b'.repeat(48) + const { firedPatterns } = redactAll([`key1=${key}`, `key2=${key}`]) + const matches = firedPatterns.filter((p) => p === 'openai-key') + expect(matches).toHaveLength(1) + }) +}) diff --git a/tests/narratorSmoke.test.ts b/tests/narratorSmoke.test.ts new file mode 100644 index 0000000..78052db --- /dev/null +++ b/tests/narratorSmoke.test.ts @@ -0,0 +1,133 @@ +/** + * Smoke tests for gitsema narrate / gitsema explain CLI handlers. + * + * Uses a mock NarratorProvider to avoid LLM calls. + * Verifies output shape and safe-by-default behavior. + */ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' +import { runNarrate, runExplain } from '../src/core/narrator/narrator.js' +import type { NarratorProvider, NarrateRequest, NarrateResponse } from '../src/core/narrator/types.js' + +// --------------------------------------------------------------------------- +// Mock NarratorProvider +// --------------------------------------------------------------------------- + +function makeMockProvider(enabled: boolean): NarratorProvider { + return { + modelName: 'mock-narrator', + async narrate(req: NarrateRequest): Promise { + if (!enabled) { + return { + prose: '[LLM narrator disabled]', + tokensUsed: 0, + redactedFields: [], + llmEnabled: false, + } + } + return { + prose: `Mock narrative for: ${req.userPrompt.slice(0, 50)}`, + tokensUsed: 42, + redactedFields: [], + llmEnabled: true, + } + }, + async destroy(): Promise {}, + } +} + +// --------------------------------------------------------------------------- +// runNarrate() +// --------------------------------------------------------------------------- + +describe('runNarrate()', () => { + it('returns safe placeholder when provider is disabled (no commits in range)', async () => { + const provider = makeMockProvider(false) + // No commits in the future — ensures empty result + const result = await runNarrate(provider, { + since: '2099-01-01', + until: '2099-01-02', + focus: 'all', + format: 'md', + }) + // With no commits, should return early without calling the provider + expect(result.commitCount).toBe(0) + expect(result.prose).toContain('No commits matched') + expect(result.llmEnabled).toBe(false) + }) + + it('returns a NarrationResult with required fields', async () => { + const provider = makeMockProvider(false) + const result = await runNarrate(provider, { focus: 'all', format: 'md', maxCommits: 0 }) + expect(result).toHaveProperty('prose') + expect(result).toHaveProperty('commitCount') + expect(result).toHaveProperty('citations') + expect(result).toHaveProperty('redactedFields') + expect(result).toHaveProperty('llmEnabled') + expect(result).toHaveProperty('format') + expect(Array.isArray(result.citations)).toBe(true) + expect(Array.isArray(result.redactedFields)).toBe(true) + }) + + it('uses the format option', async () => { + const provider = makeMockProvider(false) + const mdResult = await runNarrate(provider, { format: 'md', since: '2099-01-01' }) + expect(mdResult.format).toBe('md') + + const jsonResult = await runNarrate(provider, { format: 'json', since: '2099-01-01' }) + expect(jsonResult.format).toBe('json') + }) +}) + +// --------------------------------------------------------------------------- +// runExplain() +// --------------------------------------------------------------------------- + +describe('runExplain()', () => { + it('returns a NarrationResult for a topic with no matching commits', async () => { + const provider = makeMockProvider(true) + // Use a very unlikely error string to get zero matches + const result = await runExplain(provider, 'xyzzythiscannotexist_98765', { + format: 'md', + since: '2099-01-01', + }) + expect(result).toHaveProperty('prose') + expect(result).toHaveProperty('commitCount') + expect(result.format).toBe('md') + }) + + it('returns format=json when requested', async () => { + const provider = makeMockProvider(false) + const result = await runExplain(provider, 'some error', { format: 'json', since: '2099-01-01' }) + expect(result.format).toBe('json') + }) + + it('includes citations array in result', async () => { + const provider = makeMockProvider(true) + const result = await runExplain(provider, 'test', { format: 'md', since: '2099-01-01' }) + expect(Array.isArray(result.citations)).toBe(true) + }) +}) + +// --------------------------------------------------------------------------- +// NarrationResult shape invariants +// --------------------------------------------------------------------------- + +describe('NarrationResult shape', () => { + it('citations is always an array', async () => { + const provider = makeMockProvider(false) + const res = await runNarrate(provider, { since: '2099-01-01', format: 'text' }) + expect(Array.isArray(res.citations)).toBe(true) + }) + + it('redactedFields is always an array', async () => { + const provider = makeMockProvider(false) + const res = await runNarrate(provider, { since: '2099-01-01', format: 'text' }) + expect(Array.isArray(res.redactedFields)).toBe(true) + }) + + it('llmEnabled is always a boolean', async () => { + const provider = makeMockProvider(false) + const res = await runNarrate(provider, { since: '2099-01-01', format: 'text' }) + expect(typeof res.llmEnabled).toBe('boolean') + }) +}) diff --git a/yarn.lock b/yarn.lock index 488dd9c..628b64d 100644 --- a/yarn.lock +++ b/yarn.lock @@ -88,10 +88,25 @@ resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz" integrity sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og== -"@jsilvanus/embedeer@^1.0.2": - version "1.2.1" - resolved "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.2.1.tgz" - integrity sha512-BGcf5+LHsXMDSbhavG36k/EekSnCIpjbY0kQBHdn+rYxHUC6WKbZ6FbZ+ThZmc0Gz8ime1I/nOFmhfKWgERFnw== +"@jsilvanus/chattydeer@^0.2.0": + version "0.2.0" + resolved "https://registry.npmjs.org/@jsilvanus/chattydeer/-/chattydeer-0.2.0.tgz" + integrity sha512-Tl55/conB6xllSyf9y7zG3zXxRZp7AamnnJv1wmU/Mk2WlbsPKTh8J8LMaymIFu3tXmoBtuS2Q/3aTC0jsqPjQ== + dependencies: + "@huggingface/transformers" "^4.0.1" + "@jsilvanus/embedeer" "1.3.1" + +"@jsilvanus/embedeer@^1.3.2": + version "1.3.2" + resolved "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.3.2.tgz" + integrity sha512-7MGHSzxkLPGtwEylAgKok2DEsxvSZC6PrBLCoPfsDEAi4tpGUoSQNnfTfzqSjvStuiNLrIsvsCvfZpoEllJ83Q== + dependencies: + "@huggingface/transformers" "^4.0.1" + +"@jsilvanus/embedeer@1.3.1": + version "1.3.1" + resolved "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.3.1.tgz" + integrity sha512-5jOImrV30+S0p5dUjiGHOXP6tZq3iWhaOsfv1awczJJlouWtzR33buUVTa7oTNuL/juUzvA+JFqLNlExj3Un5g== dependencies: "@huggingface/transformers" "^4.0.1" From b4dbc1b303bc5606945598aae952660710175cd5 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 9 Apr 2026 16:50:15 +0000 Subject: [PATCH 4/5] fix: address code review comments (redact simplification, constants) Agent-Logs-Url: https://github.com/jsilvanus/gitsema/sessions/4539623d-376d-41b2-99f6-72a14cfe3a9e Co-authored-by: jsilvanus <22452468+jsilvanus@users.noreply.github.com> --- src/cli/commands/models.ts | 7 +++++++ src/core/narrator/chattydeerProvider.ts | 8 ++++---- src/core/narrator/narrator.ts | 12 ++++-------- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/cli/commands/models.ts b/src/cli/commands/models.ts index 3ffad11..49be2d1 100644 --- a/src/cli/commands/models.ts +++ b/src/cli/commands/models.ts @@ -561,6 +561,13 @@ export async function modelsRemoveCommand( } } +// --------------------------------------------------------------------------- +// Narrator model management subcommands +// --------------------------------------------------------------------------- + +/** Default provider family for narrator model configs. */ +const DEFAULT_NARRATOR_PROVIDER = 'chattydeer' + // --------------------------------------------------------------------------- // models narrator-list — list narrator model configs (kind='narrator' in embed_config) // --------------------------------------------------------------------------- diff --git a/src/core/narrator/chattydeerProvider.ts b/src/core/narrator/chattydeerProvider.ts index 35f75a6..9bc3c4d 100644 --- a/src/core/narrator/chattydeerProvider.ts +++ b/src/core/narrator/chattydeerProvider.ts @@ -126,7 +126,7 @@ export class ChattydeerNarratorProvider implements NarratorProvider { maxTokens, }) - const prose = result.explanation === 'INSUFFICIENT_EVIDENCE' + const prose = result.explanation === INSUFFICIENT_EVIDENCE_SENTINEL ? '(narrator: insufficient evidence — no meaningful content to summarise)' : result.explanation @@ -192,9 +192,9 @@ function buildHttpGenerateFn(params: NarratorModelParams) { } } -// --------------------------------------------------------------------------- -// Factory helpers -// --------------------------------------------------------------------------- +const INSUFFICIENT_EVIDENCE_SENTINEL = 'INSUFFICIENT_EVIDENCE' + + /** * Create a disabled-mode provider (safe-by-default, no network calls). diff --git a/src/core/narrator/narrator.ts b/src/core/narrator/narrator.ts index a27b6c4..18bfbc1 100644 --- a/src/core/narrator/narrator.ts +++ b/src/core/narrator/narrator.ts @@ -203,10 +203,8 @@ export async function runNarrate( // 4. Final narrative const finalPrompt = buildFinalNarrativePrompt(batchSummaries, events, focus) - const { text: redactedFinal, firedPatterns } = (() => { - const r = redactAll([finalPrompt]) - return { text: r.texts[0], firedPatterns: r.firedPatterns } - })() + const { texts: finalTexts, firedPatterns } = redactAll([finalPrompt]) + const redactedFinal = finalTexts[0] for (const p of firedPatterns) { if (!allRedacted.includes(p)) allRedacted.push(p) } const finalRes = await provider.narrate({ @@ -274,10 +272,8 @@ export async function runExplain( `\nPlease provide:\n1. A timeline of when this issue appeared.\n2. Likely introduction commit(s) with hashes.\n3. Any fix attempts with commit hashes.\n4. Current status (resolved / ongoing).\nLabel inferences clearly. Cite commit hashes in square brackets.`, ].filter(Boolean).join('\n') - const { text: redactedPrompt, firedPatterns } = redactAll([userPrompt]).texts.reduce( - (acc, t, i) => ({ text: t, firedPatterns: redactAll([userPrompt]).firedPatterns }), - { text: userPrompt, firedPatterns: [] as string[] }, - ) + const { texts: explainTexts, firedPatterns } = redactAll([userPrompt]) + const redactedPrompt = explainTexts[0] const res = await provider.narrate({ systemPrompt: 'You are a software incident analyst. Be factual and cite commit hashes for every claim.', From bb71f70d4ac069511b96ed5d359f69c903acbc96 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 9 Apr 2026 18:13:31 +0000 Subject: [PATCH 5/5] feat: evidence-only mode, models --narrator/--guide, gitsema guide, guide HTTP route, chattydeer contract Agent-Logs-Url: https://github.com/jsilvanus/gitsema/sessions/c93ecd61-9000-45c1-b32f-2ac61e99b4c2 Co-authored-by: jsilvanus <22452468+jsilvanus@users.noreply.github.com> --- docs/chattydeer_contract.md | 201 +++++++++ src/cli/commands/guide.ts | 216 +++++++++ src/cli/commands/models.ts | 124 +++-- src/cli/commands/narrate.ts | 27 +- src/cli/register/all.ts | 4 + src/cli/register/setup.ts | 74 ++- src/core/narrator/narrator.ts | 45 +- src/core/narrator/resolveNarrator.ts | 101 +++++ src/core/narrator/types.ts | 13 + src/mcp/tools/narrator.ts | 51 ++- src/server/app.ts | 4 + src/server/routes/guide.ts | 58 +++ yarn.lock | 653 ++++++++++++++++++++++++++- 13 files changed, 1475 insertions(+), 96 deletions(-) create mode 100644 docs/chattydeer_contract.md create mode 100644 src/cli/commands/guide.ts create mode 100644 src/server/routes/guide.ts diff --git a/docs/chattydeer_contract.md b/docs/chattydeer_contract.md new file mode 100644 index 0000000..aa768a5 --- /dev/null +++ b/docs/chattydeer_contract.md @@ -0,0 +1,201 @@ +# `@jsilvanus/chattydeer` — Contract for gitsema-guide function calling + +> This document describes the additional API surface that gitsema requires from +> `@jsilvanus/chattydeer` to power the `gitsema guide` interactive chat with +> full function-call (tool-call) execution. +> The current chattydeer version (`0.2.0`) satisfies narration/explain. +> Everything in this document is **pending** — it will be implemented in +> chattydeer once this contract is agreed. + +--- + +## Background + +`gitsema guide` needs an **agentic loop**: + +1. User asks a question. +2. The LLM decides which gitsema tools to call (e.g. `semantic_search`, + `recent_commits`, `file-evolution`). +3. chattydeer executes the tool calls and feeds results back. +4. Repeat until the LLM returns a final answer. + +The current chattydeer `Explainer` API (single-shot text generation) is +sufficient for narrate/explain but cannot support this loop. + +--- + +## Required additions to `@jsilvanus/chattydeer` + +### 1. `ChatSession` — multi-turn conversation object + +```typescript +interface ChatMessage { + role: 'system' | 'user' | 'assistant' | 'tool' + content: string + /** Present when role='tool' — the name of the tool that produced this result. */ + toolName?: string + /** Present when role='assistant' — the tool calls the LLM wants to make. */ + toolCalls?: ToolCall[] +} + +interface ToolCall { + id: string // unique per call (passed back in tool result) + name: string // tool name (e.g. 'semantic_search') + arguments: Record // parsed JSON args +} + +interface ChatSession { + messages: ChatMessage[] + append(msg: ChatMessage): void +} +``` + +### 2. `ChatCompletionProvider` — streaming + tool-calling chat + +```typescript +interface ToolDefinition { + name: string + description: string + /** JSON Schema object describing the parameters. */ + parameters: Record +} + +interface ChatCompletionRequest { + session: ChatSession + tools?: ToolDefinition[] + /** Max tokens for this completion turn. */ + maxTokens?: number + /** Temperature (0 = deterministic). */ + temperature?: number + /** If true, stream token deltas via AsyncIterable. */ + stream?: boolean +} + +interface ChatCompletionResponse { + message: ChatMessage // the assistant's response (text or tool_calls) + tokensUsed: number + finishReason: 'stop' | 'tool_calls' | 'length' | 'error' +} + +interface ChatCompletionProvider { + /** Single completion turn (non-streaming). */ + complete(req: ChatCompletionRequest): Promise + /** Streaming completion; yields token deltas until finish. */ + stream(req: ChatCompletionRequest): AsyncIterable<{ delta: string; done: boolean }> + destroy(): Promise +} +``` + +### 3. `AgentLoop` — agentic tool-execution helper + +```typescript +interface AgentLoopOptions { + provider: ChatCompletionProvider + tools: ToolDefinition[] + /** Callback invoked by AgentLoop to execute a tool call. */ + executeTool(call: ToolCall): Promise + /** Maximum number of LLM → tool → LLM roundtrips (default: 5). */ + maxRoundtrips?: number + /** Callback for observing intermediate messages (optional). */ + onMessage?: (msg: ChatMessage) => void +} + +interface AgentLoopResult { + answer: string // final assistant text + messages: ChatMessage[] // full conversation history + roundtrips: number // number of tool-call roundtrips used +} + +/** Run an agentic loop until the LLM produces a final answer (no more tool_calls). */ +function runAgentLoop(session: ChatSession, opts: AgentLoopOptions): Promise +``` + +### 4. Factory function + +```typescript +/** + * Create a ChatCompletionProvider backed by any OpenAI-compatible endpoint. + * + * @param httpUrl Base URL (e.g. 'https://api.openai.com') + * @param model Model name (e.g. 'gpt-4o-mini') + * @param apiKey Optional bearer token + */ +function createChatProvider( + httpUrl: string, + model: string, + apiKey?: string, +): ChatCompletionProvider +``` + +### 5. OpenAI-compatible `/v1/chat/completions` pass-through (optional) + +For `gitsema tools serve` to expose a full OpenAI-compatible HTTP endpoint, +chattydeer should optionally provide an Express middleware / handler: + +```typescript +/** + * Returns an Express RequestHandler that proxies POST /v1/chat/completions + * requests to the configured provider, with optional tool injection. + * + * gitsema uses this to expose its tool registry as OpenAI function calls + * to any compatible client (e.g. Claude Desktop, Continue.dev). + */ +function createOpenAiChatHandler( + provider: ChatCompletionProvider, + tools?: ToolDefinition[], + executeTool?: (call: ToolCall) => Promise, +): RequestHandler +``` + +--- + +## gitsema tool registry (to be exposed as function calls) + +The following gitsema internal tools will be registered with the agent loop: + +| Tool name | Description | Key parameters | +|-----------------------|-----------------------------------------------|--------------------------| +| `semantic_search` | Vector similarity search over git history | `query`, `topK` | +| `recent_commits` | Fetch N most recent commits | `n` | +| `file_evolution` | Semantic drift of a single file | `path`, `since`, `until` | +| `concept_evolution` | Concept drift across the codebase | `query`, `topK` | +| `repo_stats` | Basic repository statistics | — | +| `narrate_repo` | Return commit evidence for a range | `since`, `until`, `focus`| +| `explain_topic` | Return commits matching a topic | `topic`, `since`, `until`| +| `branch_summary` | Semantic summary of a branch vs main | `branch` | + +--- + +## Redaction requirement + +Before any user/tool content is sent to a remote provider, the chattydeer +`AgentLoop` must support a `redactContent` hook: + +```typescript +interface AgentLoopOptions { + // ... (existing fields) + /** Optional: called on every message before it leaves gitsema. Modify in place. */ + redactContent?: (text: string) => string +} +``` + +gitsema will wire `redactAll` from `src/core/narrator/redact.ts` here. + +--- + +## Versioning + +- Targeting chattydeer `>= 0.3.0` for `ChatCompletionProvider` + `AgentLoop`. +- OpenAI pass-through handler targeted for `>= 0.4.0`. +- gitsema will pin `@jsilvanus/chattydeer@^0.3.0` once these are released. + +--- + +## Acceptance criteria for gitsema + +- [ ] `gitsema guide "What does the auth module do?"` performs semantic_search, feeds results to LLM, returns answer. +- [ ] `gitsema guide --interactive` supports multi-turn conversation with tool calls. +- [ ] `POST /api/v1/guide/chat` uses the agent loop (same tools, same redaction). +- [ ] `POST /v1/chat/completions` on the gitsema HTTP server is OpenAI-compatible. +- [ ] All tool call arguments and results are redacted before leaving the process. +- [ ] Agent loop terminates within `maxRoundtrips` (default 5). diff --git a/src/cli/commands/guide.ts b/src/cli/commands/guide.ts new file mode 100644 index 0000000..ba9dc61 --- /dev/null +++ b/src/cli/commands/guide.ts @@ -0,0 +1,216 @@ +/** + * `gitsema guide` — interactive LLM chat with access to gitsema tools. + * + * The guide uses the active "guide" model config (kind='guide' in embed_config), + * falling back to the active narrator model. It builds a context from gitsema + * search results and recent git history, then asks the LLM to answer the + * user's question. + * + * For multi-turn / function-call capable execution, see docs/chattydeer_contract.md. + * The current implementation does a single context-enriched Q&A. + * + * Safe-by-default: if no guide or narrator model is configured the command + * prints the gathered context without calling an LLM. + */ + +import type { Command } from 'commander' +import { execSync } from 'node:child_process' +import { createInterface } from 'node:readline' +import { resolveGuideProvider } from '../../core/narrator/resolveNarrator.js' +import { redactAll } from '../../core/narrator/redact.js' +import { logger } from '../../utils/logger.js' + +// --------------------------------------------------------------------------- +// Context gathering helpers +// --------------------------------------------------------------------------- + +interface GuideTool { + name: string + description: string + call: (args: Record) => string +} + +/** Registry of built-in gitsema tools available to the guide. */ +const GUIDE_TOOLS: GuideTool[] = [ + { + name: 'recent_commits', + description: 'Fetch the N most recent git commits (subject + hash + date)', + call: ({ n = '20' }) => { + try { + return execSync(`git log --max-count=${n} --format="%H %ai %s"`, { + cwd: process.cwd(), + encoding: 'utf8', + maxBuffer: 2 * 1024 * 1024, + }).trim() + } catch { + return '(git log failed)' + } + }, + }, + { + name: 'repo_stats', + description: 'Basic repository statistics (branches, tags, total commits)', + call: () => { + try { + const branches = execSync('git branch --list | wc -l', { encoding: 'utf8' }).trim() + const tags = execSync('git tag --list | wc -l', { encoding: 'utf8' }).trim() + const commits = execSync('git rev-list --count HEAD 2>/dev/null || echo 0', { encoding: 'utf8' }).trim() + const remotes = execSync('git remote -v 2>/dev/null | head -4', { encoding: 'utf8' }).trim() + return `Branches: ${branches} Tags: ${tags} Commits: ${commits}\nRemotes:\n${remotes}` + } catch { + return '(stats unavailable)' + } + }, + }, +] + +/** Gather quick context for the guide prompt. */ +function gatherContext(question: string): string { + const parts: string[] = [] + + // Recent commits + const recentTool = GUIDE_TOOLS.find((t) => t.name === 'recent_commits')! + parts.push(`## Recent commits\n${recentTool.call({ n: '15' })}`) + + // Repo stats + const statsTool = GUIDE_TOOLS.find((t) => t.name === 'repo_stats')! + parts.push(`## Repository stats\n${statsTool.call({})}`) + + return parts.join('\n\n') +} + +// --------------------------------------------------------------------------- +// Core guide Q&A +// --------------------------------------------------------------------------- + +export async function runGuide(question: string, opts: { + guideModelId?: number + model?: string + includeContext?: boolean +}): Promise<{ answer: string; contextUsed: boolean; llmEnabled: boolean }> { + const includeContext = opts.includeContext !== false + + // Build context + const context = includeContext ? gatherContext(question) : '' + + // Resolve guide provider (guide model → narrator model → disabled) + const provider = resolveGuideProvider({ + guideModelId: opts.guideModelId, + modelName: opts.model, + }) + + try { + if (!provider) { + const answer = [ + '# Repository Context', + '', + context || '(no context gathered)', + '', + '---', + '', + `**Question:** ${question}`, + '', + '> No guide or narrator model configured. Run `gitsema models add-guide --http-url --activate` to enable LLM answers.', + ].join('\n') + return { answer, contextUsed: includeContext, llmEnabled: false } + } + + // Build system + user prompt + const systemPrompt = [ + 'You are gitsema-guide, an expert assistant for the git repository the user is working in.', + 'You have access to repository context gathered by gitsema tools.', + 'Answer questions about the codebase, history, and development patterns.', + 'Always cite commit hashes when referencing specific changes.', + 'Be concise, factual, and mention when you are uncertain.', + ].join('\n') + + const rawUserPrompt = context + ? `Repository context:\n${context}\n\n---\n\nQuestion: ${question}` + : `Question: ${question}` + + const { texts, firedPatterns } = redactAll([rawUserPrompt]) + const userPrompt = texts[0] + if (firedPatterns.length > 0) { + logger.info(`[guide] redacted ${firedPatterns.length} pattern(s) from prompt`) + } + + const res = await provider.narrate({ + systemPrompt, + userPrompt, + maxTokens: 1024, + }) + + return { answer: res.prose, contextUsed: includeContext, llmEnabled: res.llmEnabled } + } finally { + await provider.destroy() + } +} + +// --------------------------------------------------------------------------- +// CLI: single-shot Q&A +// --------------------------------------------------------------------------- + +export async function guideCommand( + question: string | undefined, + opts: { + guideModelId?: string + model?: string + noContext?: boolean + interactive?: boolean + }, +): Promise { + const guideModelId = opts.guideModelId !== undefined ? parseInt(opts.guideModelId, 10) : undefined + const includeContext = !opts.noContext + + // Interactive mode: read questions from stdin line-by-line + if (opts.interactive || (!question && process.stdin.isTTY)) { + const rl = createInterface({ input: process.stdin, output: process.stdout }) + console.log('gitsema guide — type your question (Ctrl-C or empty line to exit)\n') + rl.prompt() + rl.on('line', async (line) => { + const q = line.trim() + if (!q) { rl.close(); return } + const { answer, llmEnabled } = await runGuide(q, { guideModelId, model: opts.model, includeContext }) + console.log(`\n${answer}\n`) + if (!llmEnabled) { + console.log('(No LLM model configured — showing context only.)\n') + } + rl.prompt() + }) + rl.on('close', () => process.exit(0)) + return + } + + // Single-shot + const q = question ?? '' + if (!q) { + console.error('Usage: gitsema guide [options]') + console.error(' gitsema guide --interactive') + process.exit(1) + } + + const { answer, llmEnabled } = await runGuide(q, { guideModelId, model: opts.model, includeContext }) + console.log(answer) + if (!llmEnabled) { + console.error('\n(No LLM model configured — run `gitsema models add-guide --http-url --activate` to enable.)') + } +} + +// --------------------------------------------------------------------------- +// Registration helper +// --------------------------------------------------------------------------- + +export function registerGuideCommand(program: Command): void { + program + .command('guide [question]') + .description( + 'Interactive LLM chat that answers questions about this repository. ' + + 'Uses the active guide model (or narrator model as fallback). ' + + 'Prints gathered context even when no LLM is configured.', + ) + .option('--guide-model-id ', 'embed_config.id of the guide model to use') + .option('--model ', 'guide/narrator model name to use') + .option('--no-context', 'skip gathering git context (faster but less accurate)') + .option('-i, --interactive', 'start an interactive REPL session (one question per line)') + .action(guideCommand) +} diff --git a/src/cli/commands/models.ts b/src/cli/commands/models.ts index 49be2d1..76d233d 100644 --- a/src/cli/commands/models.ts +++ b/src/cli/commands/models.ts @@ -562,23 +562,33 @@ export async function modelsRemoveCommand( } // --------------------------------------------------------------------------- -// Narrator model management subcommands +// Narrator / Guide model management (kind-aware, DB-backed) // --------------------------------------------------------------------------- -/** Default provider family for narrator model configs. */ +/** Default provider for narrator/guide model configs (LLM chat completions). */ const DEFAULT_NARRATOR_PROVIDER = 'chattydeer' +type NarratorKind = 'narrator' | 'guide' + +/** Helper that resolves imports and DB for narrator/guide operations. */ +async function getNarratorDb() { + const { getRawDb } = await import('../../core/db/sqlite.js') + const rawDb = getRawDb() + const resolver = await import('../../core/narrator/resolveNarrator.js') + return { rawDb, resolver } +} + // --------------------------------------------------------------------------- -// models narrator-list — list narrator model configs (kind='narrator' in embed_config) +// models list --narrator / --guide // --------------------------------------------------------------------------- -export async function modelsNarratorListCommand(opts: { json?: boolean } = {}): Promise { +export async function modelsKindListCommand(kind: NarratorKind, opts: { json?: boolean } = {}): Promise { try { - const { getRawDb } = await import('../../core/db/sqlite.js') - const rawDb = getRawDb() - const { listNarratorConfigs, getActiveNarratorConfigId } = await import('../../core/narrator/resolveNarrator.js') - const configs = listNarratorConfigs(rawDb) - const activeId = getActiveNarratorConfigId(rawDb) + const { rawDb, resolver } = await getNarratorDb() + const configs = kind === 'narrator' ? resolver.listNarratorConfigs(rawDb) : resolver.listGuideConfigs(rawDb) + const activeId = kind === 'narrator' + ? resolver.getActiveNarratorConfigId(rawDb) + : resolver.getActiveGuideConfigId(rawDb) if (opts.json) { console.log(JSON.stringify(configs.map((c) => ({ ...c, active: c.id === activeId })), null, 2)) @@ -586,9 +596,9 @@ export async function modelsNarratorListCommand(opts: { json?: boolean } = {}): } if (configs.length === 0) { - console.log('No narrator model configs found.') + console.log(`No ${kind} model configs found.`) console.log('') - console.log('Add one: gitsema models narrator-add --http-url [--key ]') + console.log(`Add one: gitsema models add --${kind} --http-url [--key ]`) return } @@ -600,15 +610,20 @@ export async function modelsNarratorListCommand(opts: { json?: boolean } = {}): console.log(`${String(c.id).padEnd(4)} ${c.name.padEnd(30)} ${c.provider.padEnd(12)} ${active.padEnd(7)} ${url}`) } console.log('') - console.log(`${configs.length} narrator model(s). Active ID: ${activeId ?? '(none)'}`) + console.log(`${configs.length} ${kind} model(s). Active ID: ${activeId ?? '(none)'}`) } catch (err) { console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) process.exit(1) } } +/** @deprecated Use `modelsKindListCommand('narrator')` instead. */ +export async function modelsNarratorListCommand(opts: { json?: boolean } = {}): Promise { + return modelsKindListCommand('narrator', opts) +} + // --------------------------------------------------------------------------- -// models narrator-add — add / update a narrator model config +// models add --narrator / --guide // --------------------------------------------------------------------------- export interface ModelsNarratorAddOptions { @@ -619,8 +634,9 @@ export interface ModelsNarratorAddOptions { activate?: boolean } -export async function modelsNarratorAddCommand( +export async function modelsKindAddCommand( name: string, + kind: NarratorKind, opts: ModelsNarratorAddOptions, ): Promise { if (!name || !name.trim()) { @@ -628,14 +644,12 @@ export async function modelsNarratorAddCommand( process.exit(1) } if (!opts.httpUrl) { - console.error('Error: --http-url is required for narrator models') + console.error(`Error: --http-url is required for ${kind} models`) process.exit(1) } try { - const { getRawDb } = await import('../../core/db/sqlite.js') - const rawDb = getRawDb() - const { saveNarratorConfig, setActiveNarratorConfig } = await import('../../core/narrator/resolveNarrator.js') + const { rawDb, resolver } = await getNarratorDb() const params = { httpUrl: opts.httpUrl, @@ -644,17 +658,20 @@ export async function modelsNarratorAddCommand( ...(opts.temperature ? { temperature: parseFloat(opts.temperature) } : {}), } - const id = saveNarratorConfig(rawDb, name.trim(), 'chattydeer', params) - console.log(`Saved narrator model config '${name}' (id=${id}).`) - console.log(` Provider: chattydeer`) + const saveFn = kind === 'narrator' ? resolver.saveNarratorConfig : resolver.saveGuideConfig + const activateFn = kind === 'narrator' ? resolver.setActiveNarratorConfig : resolver.setActiveGuideConfig + const id = saveFn(rawDb, name.trim(), DEFAULT_NARRATOR_PROVIDER, params) + + console.log(`Saved ${kind} model config '${name}' (id=${id}).`) + console.log(` Provider: ${DEFAULT_NARRATOR_PROVIDER}`) console.log(` HTTP URL: ${opts.httpUrl}`) if (opts.key) console.log(` API key: (set)`) if (opts.activate) { - setActiveNarratorConfig(rawDb, id) - console.log(` Activated as default narrator (id=${id}).`) + activateFn(rawDb, id) + console.log(` Activated as default ${kind} (id=${id}).`) } else { - console.log(` To activate: gitsema models narrator-activate ${name}`) + console.log(` To activate: gitsema models activate ${name} --${kind}`) } } catch (err) { console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) @@ -662,62 +679,77 @@ export async function modelsNarratorAddCommand( } } +/** @deprecated Use `modelsKindAddCommand(name, 'narrator', opts)` instead. */ +export async function modelsNarratorAddCommand(name: string, opts: ModelsNarratorAddOptions): Promise { + return modelsKindAddCommand(name, 'narrator', opts) +} + // --------------------------------------------------------------------------- -// models narrator-activate — set the active narrator model +// models activate --narrator / --guide // --------------------------------------------------------------------------- -export async function modelsNarratorActivateCommand(name: string): Promise { +export async function modelsKindActivateCommand(name: string, kind: NarratorKind): Promise { if (!name || !name.trim()) { console.error('Error: model name is required') process.exit(1) } try { - const { getRawDb } = await import('../../core/db/sqlite.js') - const rawDb = getRawDb() - const { getNarratorConfigByName, setActiveNarratorConfig } = await import('../../core/narrator/resolveNarrator.js') - const config = getNarratorConfigByName(rawDb, name.trim()) + const { rawDb, resolver } = await getNarratorDb() + const getByName = kind === 'narrator' ? resolver.getNarratorConfigByName : resolver.getGuideConfigByName + const activateFn = kind === 'narrator' ? resolver.setActiveNarratorConfig : resolver.setActiveGuideConfig + const config = getByName(rawDb, name.trim()) if (!config) { - console.error(`Error: no narrator model config found for '${name}'.`) - console.error(`Run: gitsema models narrator-list`) + console.error(`Error: no ${kind} model config found for '${name}'.`) + console.error(`Run: gitsema models list --${kind}`) process.exit(1) } - setActiveNarratorConfig(rawDb, config.id) - console.log(`Narrator model '${name}' (id=${config.id}) is now active.`) + activateFn(rawDb, config.id) + console.log(`${kind.charAt(0).toUpperCase() + kind.slice(1)} model '${name}' (id=${config.id}) is now active.`) } catch (err) { console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) process.exit(1) } } +/** @deprecated Use `modelsKindActivateCommand(name, 'narrator')` instead. */ +export async function modelsNarratorActivateCommand(name: string): Promise { + return modelsKindActivateCommand(name, 'narrator') +} + // --------------------------------------------------------------------------- -// models narrator-remove — remove a narrator model config +// models remove --narrator / --guide // --------------------------------------------------------------------------- -export async function modelsNarratorRemoveCommand(name: string): Promise { +export async function modelsKindRemoveCommand(name: string, kind: NarratorKind): Promise { if (!name || !name.trim()) { console.error('Error: model name is required') process.exit(1) } try { - const { getRawDb } = await import('../../core/db/sqlite.js') - const rawDb = getRawDb() - const { deleteNarratorConfig, getActiveNarratorConfig, clearActiveNarratorConfig } = await import('../../core/narrator/resolveNarrator.js') + const { rawDb, resolver } = await getNarratorDb() + const getActive = kind === 'narrator' ? resolver.getActiveNarratorConfig : resolver.getActiveGuideConfig + const deleteFn = kind === 'narrator' ? resolver.deleteNarratorConfig : resolver.deleteGuideConfig + const clearFn = kind === 'narrator' ? resolver.clearActiveNarratorConfig : resolver.clearActiveGuideConfig - // Check if it's the active narrator — clear if so - const active = getActiveNarratorConfig(rawDb) - const removed = deleteNarratorConfig(rawDb, name.trim()) + const active = getActive(rawDb) + const removed = deleteFn(rawDb, name.trim()) if (removed) { if (active?.name === name.trim()) { - clearActiveNarratorConfig(rawDb) - console.log(`Removed narrator model config '${name}' (was active — selection cleared).`) + clearFn(rawDb) + console.log(`Removed ${kind} model config '${name}' (was active — selection cleared).`) } else { - console.log(`Removed narrator model config '${name}'.`) + console.log(`Removed ${kind} model config '${name}'.`) } } else { - console.log(`No narrator model config found for '${name}'.`) + console.log(`No ${kind} model config found for '${name}'.`) } } catch (err) { console.error(`Error: ${err instanceof Error ? err.message : String(err)}`) process.exit(1) } } + +/** @deprecated Use `modelsKindRemoveCommand(name, 'narrator')` instead. */ +export async function modelsNarratorRemoveCommand(name: string): Promise { + return modelsKindRemoveCommand(name, 'narrator') +} diff --git a/src/cli/commands/narrate.ts b/src/cli/commands/narrate.ts index 26df2a1..6f9d46d 100644 --- a/src/cli/commands/narrate.ts +++ b/src/cli/commands/narrate.ts @@ -17,6 +17,11 @@ import type { NarrateFocus, NarrateFormat, NarrationResult } from '../../core/na // --------------------------------------------------------------------------- function formatResult(result: NarrationResult): string { + // Evidence-only mode: just print the JSON (commits) + if (result.evidence !== undefined) { + return result.prose // already JSON-serialised by runNarrate/runExplain + } + const { prose, commitCount, citations, llmEnabled, format } = result if (!llmEnabled) { @@ -83,8 +88,12 @@ export async function narrateCommand( maxCommits?: string narratorModelId?: string model?: string + narrate?: boolean + evidenceOnly?: boolean }, ): Promise { + // --narrate is shorthand for --no-evidence-only; default is evidence-only + const evidenceOnly = opts.narrate ? false : (opts.evidenceOnly !== false) const narratorModelId = opts.narratorModelId !== undefined ? parseInt(opts.narratorModelId, 10) : undefined const provider = resolveNarratorProvider({ narratorModelId, @@ -100,6 +109,7 @@ export async function narrateCommand( focus: (opts.focus as NarrateFocus) ?? 'all', format: (opts.format as NarrateFormat) ?? 'md', maxCommits: opts.maxCommits ? parseInt(opts.maxCommits, 10) : undefined, + evidenceOnly, }) } finally { await provider.destroy() @@ -122,8 +132,12 @@ export async function explainCommand( format?: string narratorModelId?: string model?: string + narrate?: boolean + evidenceOnly?: boolean }, ): Promise { + // --narrate is shorthand for --no-evidence-only; default is evidence-only + const evidenceOnly = opts.narrate ? false : (opts.evidenceOnly !== false) const narratorModelId = opts.narratorModelId !== undefined ? parseInt(opts.narratorModelId, 10) : undefined const provider = resolveNarratorProvider({ narratorModelId, @@ -138,6 +152,7 @@ export async function explainCommand( log: opts.log, files: opts.files, format: (opts.format as NarrateFormat) ?? 'md', + evidenceOnly, }) } finally { await provider.destroy() @@ -153,7 +168,7 @@ export async function explainCommand( export function registerNarratorCommands(program: Command): void { program .command('narrate') - .description('Generate a human-readable narrative of repository development history using an LLM narrator model.') + .description('Return commit evidence (default) or an LLM-generated narrative of repository development history.') .option('--since ', 'only include commits after this ref or date') .option('--until ', 'only include commits before this ref or date') .option('--range ', 'git revision range (e.g. v1.0..HEAD)') @@ -162,21 +177,25 @@ export function registerNarratorCommands(program: Command): void { 'filter commits by area: bugs, features, ops, security, deps, performance, all (default: all)', 'all', ) - .option('--format ', 'output format: md, text, json (default: md)', 'md') + .option('--format ', 'output format when narrating: md, text, json (default: md)', 'md') .option('--max-commits ', 'maximum commits to analyse (default: 500)') .option('--narrator-model-id ', 'embed_config.id of the narrator model to use (overrides active selection)') .option('--model ', 'narrator model name to use (overrides active selection)') + .option('--narrate', 'call the LLM narrator and return prose (default: return evidence only)') + .option('--evidence-only', 'return raw commit evidence without calling the LLM (this is the default)') .action(narrateCommand) program .command('explain ') - .description('Explain a bug, error, or topic by tracing it through git history using an LLM narrator model.') + .description('Return matching commits (default) or an LLM-generated timeline for a bug, error, or topic.') .option('--since ', 'only include commits after this ref or date') .option('--until ', 'only include commits before this ref or date') .option('--log ', 'path to an error log or stack trace file to include as context') .option('--files ', 'restrict search to files matching this glob') - .option('--format ', 'output format: md, text, json (default: md)', 'md') + .option('--format ', 'output format when narrating: md, text, json (default: md)', 'md') .option('--narrator-model-id ', 'embed_config.id of the narrator model to use (overrides active selection)') .option('--model ', 'narrator model name to use (overrides active selection)') + .option('--narrate', 'call the LLM narrator and return prose (default: return evidence only)') + .option('--evidence-only', 'return raw matching commits without calling the LLM (this is the default)') .action(explainCommand) } diff --git a/src/cli/register/all.ts b/src/cli/register/all.ts index e146385..cb28d9b 100644 --- a/src/cli/register/all.ts +++ b/src/cli/register/all.ts @@ -50,6 +50,7 @@ import { regressionGateCommand } from '../commands/regressionGate.js' import { crossRepoSimilarityCommand } from '../commands/crossRepoSimilarity.js' import { codeReviewCommand } from '../commands/codeReview.js' import { registerNarratorCommands } from '../commands/narrate.js' +import { registerGuideCommand } from '../commands/guide.js' export function registerAll(program: Command) { // Preserve per-domain registration modules @@ -541,6 +542,9 @@ export function registerAll(program: Command) { // Narrator commands: narrate + explain (LLM-powered, DB-backed model config) registerNarratorCommands(program) + + // Guide command: interactive LLM chat with gitsema tool access + registerGuideCommand(program) } export default registerAll diff --git a/src/cli/register/setup.ts b/src/cli/register/setup.ts index e9867ab..3f96220 100644 --- a/src/cli/register/setup.ts +++ b/src/cli/register/setup.ts @@ -16,6 +16,10 @@ import { modelsNarratorAddCommand, modelsNarratorActivateCommand, modelsNarratorRemoveCommand, + modelsKindListCommand, + modelsKindAddCommand, + modelsKindActivateCommand, + modelsKindRemoveCommand, } from '../commands/models.js' import { collectOut } from '../../utils/outputSink.js' @@ -208,20 +212,33 @@ Examples: }) // --------------------------------------------------------------------------- - // Narrator model management subcommands (DB-backed, kind='narrator') + // Unified narrator / guide model management (--narrator | --guide flag) // --------------------------------------------------------------------------- + // models list [--narrator] [--guide] + // (extends existing list subcommand with optional kind flag) modelsSub - .command('narrator-list') - .description('List narrator model configs (stored in DB with kind=narrator)') + .command('list-narrator') + .alias('narrator-list') + .description('List narrator model configs (kind=narrator). Alias: models list --narrator') .option('--json', 'output as JSON') .action(async (opts: { json?: boolean }) => { - await modelsNarratorListCommand(opts) + await modelsKindListCommand('narrator', opts) }) modelsSub - .command('narrator-add ') - .description('Add or update a narrator model config (stored in DB, backed by chattydeer)') + .command('list-guide') + .description('List guide model configs (kind=guide). Alias: models list --guide') + .option('--json', 'output as JSON') + .action(async (opts: { json?: boolean }) => { + await modelsKindListCommand('guide', opts) + }) + + // models add --narrator / --guide + modelsSub + .command('add-narrator ') + .alias('narrator-add') + .description('Add/update a narrator model config (--narrator shorthand). Use --activate to set as default.') .option('--http-url ', 'OpenAI-compatible base URL for chat completions (required)') .option('--key ', 'API key / Bearer token') .option('--max-tokens ', 'max tokens per narration call (default: 512)') @@ -231,20 +248,53 @@ Examples: name: string, opts: { httpUrl?: string; key?: string; maxTokens?: string; temperature?: string; activate?: boolean }, ) => { - await modelsNarratorAddCommand(name, { httpUrl: opts.httpUrl ?? '', key: opts.key, maxTokens: opts.maxTokens, temperature: opts.temperature, activate: opts.activate }) + await modelsKindAddCommand(name, 'narrator', { httpUrl: opts.httpUrl ?? '', key: opts.key, maxTokens: opts.maxTokens, temperature: opts.temperature, activate: opts.activate }) }) modelsSub - .command('narrator-activate ') - .description('Set a narrator model as the active default (resolved by gitsema narrate / gitsema explain)') + .command('add-guide ') + .description('Add/update a guide model config. Guide models power gitsema guide interactive chat.') + .option('--http-url ', 'OpenAI-compatible base URL for chat completions (required)') + .option('--key ', 'API key / Bearer token') + .option('--max-tokens ', 'max tokens per guide call (default: 512)') + .option('--temperature ', 'temperature (default: 0.3)') + .option('--activate', 'set this as the active guide model immediately') + .action(async ( + name: string, + opts: { httpUrl?: string; key?: string; maxTokens?: string; temperature?: string; activate?: boolean }, + ) => { + await modelsKindAddCommand(name, 'guide', { httpUrl: opts.httpUrl ?? '', key: opts.key, maxTokens: opts.maxTokens, temperature: opts.temperature, activate: opts.activate }) + }) + + // models activate --narrator / --guide + modelsSub + .command('activate-narrator ') + .alias('narrator-activate') + .description('Set a narrator model as the active default (used by gitsema narrate / explain)') .action(async (name: string) => { - await modelsNarratorActivateCommand(name) + await modelsKindActivateCommand(name, 'narrator') }) modelsSub - .command('narrator-remove ') + .command('activate-guide ') + .description('Set a guide model as the active default (used by gitsema guide)') + .action(async (name: string) => { + await modelsKindActivateCommand(name, 'guide') + }) + + // models remove --narrator / --guide + modelsSub + .command('remove-narrator ') + .alias('narrator-remove') .description('Remove a narrator model config from the DB') .action(async (name: string) => { - await modelsNarratorRemoveCommand(name) + await modelsKindRemoveCommand(name, 'narrator') + }) + + modelsSub + .command('remove-guide ') + .description('Remove a guide model config from the DB') + .action(async (name: string) => { + await modelsKindRemoveCommand(name, 'guide') }) } diff --git a/src/core/narrator/narrator.ts b/src/core/narrator/narrator.ts index 18bfbc1..4dcd9dd 100644 --- a/src/core/narrator/narrator.ts +++ b/src/core/narrator/narrator.ts @@ -164,6 +164,7 @@ export async function runNarrate( ): Promise { const focus = opts.focus ?? 'all' const format = opts.format ?? 'md' + const evidenceOnly = opts.evidenceOnly !== false // default true // 1. Fetch commits const allEvents = fetchCommitEvents({ @@ -184,10 +185,24 @@ export async function runNarrate( redactedFields: [], llmEnabled: false, format, + evidence: [], } } - // 3. Batch + map-reduce + // 3. Evidence-only: return raw commits without calling LLM + if (evidenceOnly) { + return { + prose: JSON.stringify(events, null, 2), + commitCount: events.length, + citations: events.slice(0, 20).map((e) => e.hash), + redactedFields: [], + llmEnabled: false, + format, + evidence: events, + } + } + + // 4. Batch + map-reduce (LLM path) const batches: CommitEvent[][] = [] for (let i = 0; i < events.length; i += BATCH_SIZE) { batches.push(events.slice(i, i + BATCH_SIZE)) @@ -201,7 +216,7 @@ export async function runNarrate( batchSummaries.push(summary) } - // 4. Final narrative + // 5. Final narrative const finalPrompt = buildFinalNarrativePrompt(batchSummaries, events, focus) const { texts: finalTexts, firedPatterns } = redactAll([finalPrompt]) const redactedFinal = finalTexts[0] @@ -235,6 +250,7 @@ export async function runExplain( opts: ExplainCommandOptions, ): Promise { const format = opts.format ?? 'md' + const evidenceOnly = opts.evidenceOnly !== false // default true // 1. Fetch commits const allEvents = fetchCommitEvents({ @@ -260,7 +276,28 @@ export async function runExplain( } } - // 4. Build explain prompt + const citations = relevant.slice(0, 20).map((e) => e.hash) + + // 4. Evidence-only: return raw matched commits without calling LLM + if (evidenceOnly) { + const evidenceObj = { + topic, + relevantCommits: relevant.slice(0, 30), + totalSearched: allEvents.length, + logContentIncluded: logContent.length > 0, + } + return { + prose: JSON.stringify(evidenceObj, null, 2), + commitCount: relevant.length, + citations, + redactedFields: [], + llmEnabled: false, + format, + evidence: relevant.slice(0, 30), + } + } + + // 5. Build explain prompt (LLM path) const commitLines = relevant.slice(0, 30).map((e) => `[${e.hash.slice(0, 8)}] ${e.date.slice(0, 10)} ${e.subject}`, ) @@ -281,8 +318,6 @@ export async function runExplain( maxTokens: 512, }) - const citations = relevant.slice(0, 20).map((e) => e.hash) - return { prose: res.prose, commitCount: relevant.length, diff --git a/src/core/narrator/resolveNarrator.ts b/src/core/narrator/resolveNarrator.ts index d392ed9..153faa2 100644 --- a/src/core/narrator/resolveNarrator.ts +++ b/src/core/narrator/resolveNarrator.ts @@ -203,3 +203,104 @@ export function resolveNarratorProvider(opts: { return createChattydeerProvider(config.name, config.params) } + +// --------------------------------------------------------------------------- +// Guide model config (kind='guide') — same infrastructure as narrator +// --------------------------------------------------------------------------- + +const ACTIVE_GUIDE_KEY = 'active_guide_model_config_id' + +/** List all guide model configs (kind='guide'). */ +export function listGuideConfigs(rawDb: InstanceType): NarratorModelConfig[] { + const tables = rawDb.prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name='embed_config'`).all() as Array<{ name: string }> + if (tables.length === 0) return [] + const rows = rawDb.prepare(`SELECT id, config_hash, provider, model, params_json, created_at, last_used_at FROM embed_config WHERE kind = 'guide' ORDER BY created_at ASC`).all() as NarratorRow[] + return rows.map(rowToConfig) +} + +/** Get the active guide config ID from settings. */ +export function getActiveGuideConfigId(rawDb: InstanceType): number | null { + const val = getSetting(rawDb, ACTIVE_GUIDE_KEY) + if (val === null) return null + const n = parseInt(val, 10) + return Number.isFinite(n) ? n : null +} + +/** Get the active guide config object. */ +export function getActiveGuideConfig(rawDb: InstanceType): NarratorModelConfig | null { + const id = getActiveGuideConfigId(rawDb) + if (id === null) return null + return getNarratorConfigById(rawDb, id) +} + +/** Set the active guide config by embed_config.id. */ +export function setActiveGuideConfig(rawDb: InstanceType, id: number): void { + setSetting(rawDb, ACTIVE_GUIDE_KEY, String(id)) +} + +/** Clear the active guide config selection. */ +export function clearActiveGuideConfig(rawDb: InstanceType): void { + deleteSetting(rawDb, ACTIVE_GUIDE_KEY) +} + +/** Get a guide config by name. */ +export function getGuideConfigByName(rawDb: InstanceType, name: string): NarratorModelConfig | null { + const row = rawDb.prepare(`SELECT id, config_hash, provider, model, params_json, created_at, last_used_at FROM embed_config WHERE model = ? AND kind = 'guide'`).get(name) as NarratorRow | undefined + return row ? rowToConfig(row) : null +} + +/** Save a guide model config. Returns embed_config.id. */ +export function saveGuideConfig( + rawDb: InstanceType, + name: string, + provider: string, + params: NarratorModelParams, +): number { + const hashInput = JSON.stringify({ kind: 'guide', name, provider, params }) + const configHash = createHash('sha256').update(hashInput).digest('hex') + const now = Math.floor(Date.now() / 1000) + const paramsJson = JSON.stringify(params) + + rawDb.prepare(` + INSERT OR IGNORE INTO embed_config + (config_hash, provider, model, code_model, dimensions, chunker, window_size, overlap, created_at, kind, params_json) + VALUES (?, ?, ?, NULL, 0, 'none', NULL, NULL, ?, 'guide', ?) + `).run(configHash, provider, name, now, paramsJson) + + rawDb.prepare(`UPDATE embed_config SET params_json = ?, last_used_at = ? WHERE config_hash = ?`) + .run(paramsJson, now, configHash) + + const row = rawDb.prepare(`SELECT id FROM embed_config WHERE config_hash = ?`).get(configHash) as { id: number } + return row.id +} + +/** Delete a guide config by name. Returns true if deleted. */ +export function deleteGuideConfig(rawDb: InstanceType, name: string): boolean { + const res = rawDb.prepare(`DELETE FROM embed_config WHERE model = ? AND kind = 'guide'`).run(name) + return res.changes > 0 +} + +/** Resolve the active guide NarratorProvider from the DB. Falls back to narrator config, then disabled. */ +export function resolveGuideProvider(opts: { + guideModelId?: number + modelName?: string +} = {}): ChattydeerNarratorProvider { + const { rawDb } = getActiveSession() + + let config: NarratorModelConfig | null = null + + if (opts.guideModelId !== undefined) { + config = getNarratorConfigById(rawDb, opts.guideModelId) + } else if (opts.modelName) { + config = getGuideConfigByName(rawDb, opts.modelName) ?? getNarratorConfigByName(rawDb, opts.modelName) + } else { + // Prefer guide config; fall back to narrator config + config = getActiveGuideConfig(rawDb) ?? getActiveNarratorConfig(rawDb) + } + + if (!config) { + return createDisabledProvider() + } + + return createChattydeerProvider(config.name, config.params) +} diff --git a/src/core/narrator/types.ts b/src/core/narrator/types.ts index 9b32d64..929d8b1 100644 --- a/src/core/narrator/types.ts +++ b/src/core/narrator/types.ts @@ -86,6 +86,12 @@ export interface NarrateCommandOptions { narratorModelId?: number /** Raw CLI model override (name, looked up in embed_config by name). */ model?: string + /** + * When true (the default), skip the LLM call and return the raw commit + * evidence so the caller (or an MCP agent) can narrate/filter itself. + * Set to false (or pass --narrate on CLI) to call the configured LLM. + */ + evidenceOnly?: boolean } export interface ExplainCommandOptions { @@ -96,6 +102,8 @@ export interface ExplainCommandOptions { format?: NarrateFormat narratorModelId?: number model?: string + /** Same semantics as in NarrateCommandOptions. Default true. */ + evidenceOnly?: boolean } // --------------------------------------------------------------------------- @@ -123,4 +131,9 @@ export interface NarrationResult { redactedFields: string[] llmEnabled: boolean format: NarrateFormat + /** + * Raw evidence (commit events) returned when evidenceOnly=true. + * Undefined when LLM narration was performed. + */ + evidence?: CommitEvent[] } diff --git a/src/mcp/tools/narrator.ts b/src/mcp/tools/narrator.ts index 63e60c8..4803d09 100644 --- a/src/mcp/tools/narrator.ts +++ b/src/mcp/tools/narrator.ts @@ -14,18 +14,21 @@ export function registerNarratorTools(server: McpServer) { // narrate_repo server.tool( 'narrate_repo', - 'Generate a human-readable narrative of repository development history using the configured LLM narrator model. Safe-by-default: returns a placeholder when no narrator model is configured.', + 'Return commit evidence (or an LLM narrative when evidence_only=false) for repository development history. ' + + 'By default (evidence_only=true) returns raw classified commits so the calling agent can narrate itself. ' + + 'Set evidence_only=false to invoke the configured LLM narrator model.', { since: z.string().optional().describe('Only include commits after this ref or date (e.g. "v1.0", "2024-01-01")'), until: z.string().optional().describe('Only include commits before this ref or date'), range: z.string().optional().describe('Git revision range (e.g. "v1.0..HEAD")'), focus: z.enum(['bugs', 'features', 'ops', 'security', 'deps', 'performance', 'all']).optional().default('all').describe('Filter commits by category'), - format: z.enum(['md', 'text', 'json']).optional().default('md').describe('Output format'), + format: z.enum(['md', 'text', 'json']).optional().default('md').describe('Output format (used when evidence_only=false)'), max_commits: z.number().int().positive().optional().describe('Maximum commits to analyse'), - narrator_model_id: z.number().int().positive().optional().describe('embed_config.id of the narrator model to use'), - model: z.string().optional().describe('Narrator model name to use (overrides active selection)'), + evidence_only: z.boolean().optional().default(true).describe('Return raw commit evidence instead of calling LLM (default: true). Set false to narrate via LLM.'), + narrator_model_id: z.number().int().positive().optional().describe('embed_config.id of the narrator model to use (only used when evidence_only=false)'), + model: z.string().optional().describe('Narrator model name to use (only used when evidence_only=false)'), }, - async ({ since, until, range, focus, format, max_commits, narrator_model_id, model }) => { + async ({ since, until, range, focus, format, max_commits, evidence_only, narrator_model_id, model }) => { const provider = resolveNarratorProvider({ narratorModelId: narrator_model_id, modelName: model, @@ -39,10 +42,19 @@ export function registerNarratorTools(server: McpServer) { focus: focus as 'bugs' | 'features' | 'ops' | 'security' | 'deps' | 'performance' | 'all', format: format as 'md' | 'text' | 'json', maxCommits: max_commits, + evidenceOnly: evidence_only, }) let text: string - if (format === 'json') { + if (result.evidence !== undefined) { + // Evidence-only mode: return structured JSON + text = JSON.stringify({ + evidenceOnly: true, + commitCount: result.commitCount, + citations: result.citations, + evidence: result.evidence, + }, null, 2) + } else if (format === 'json') { text = JSON.stringify({ prose: result.prose, commitCount: result.commitCount, @@ -69,16 +81,19 @@ export function registerNarratorTools(server: McpServer) { // explain_issue_or_error server.tool( 'explain_issue_or_error', - 'Explain a bug, error, or concept by tracing it through git history using the configured LLM narrator model. Returns a timeline with commit citations.', + 'Return commit evidence (or an LLM timeline when evidence_only=false) for a bug, error, or concept traced through git history. ' + + 'By default (evidence_only=true) returns matching commits so the calling agent can build its own explanation. ' + + 'Set evidence_only=false to invoke the configured LLM narrator model.', { - topic: z.string().min(1).describe('The bug, error message, or concept to explain (e.g. "NullPointerException in auth handler")'), + topic: z.string().min(1).describe('The bug, error message, or concept to trace (e.g. "NullPointerException in auth handler")'), since: z.string().optional().describe('Only include commits after this ref or date'), until: z.string().optional().describe('Only include commits before this ref or date'), - format: z.enum(['md', 'text', 'json']).optional().default('md').describe('Output format'), - narrator_model_id: z.number().int().positive().optional().describe('embed_config.id of the narrator model to use'), - model: z.string().optional().describe('Narrator model name to use (overrides active selection)'), + format: z.enum(['md', 'text', 'json']).optional().default('md').describe('Output format (used when evidence_only=false)'), + evidence_only: z.boolean().optional().default(true).describe('Return raw matching commits instead of calling LLM (default: true). Set false to explain via LLM.'), + narrator_model_id: z.number().int().positive().optional().describe('embed_config.id of the narrator model to use (only used when evidence_only=false)'), + model: z.string().optional().describe('Narrator model name to use (only used when evidence_only=false)'), }, - async ({ topic, since, until, format, narrator_model_id, model }) => { + async ({ topic, since, until, format, evidence_only, narrator_model_id, model }) => { const provider = resolveNarratorProvider({ narratorModelId: narrator_model_id, modelName: model, @@ -89,10 +104,20 @@ export function registerNarratorTools(server: McpServer) { since, until, format: format as 'md' | 'text' | 'json', + evidenceOnly: evidence_only, }) let text: string - if (format === 'json') { + if (result.evidence !== undefined) { + // Evidence-only mode: return structured JSON + text = JSON.stringify({ + evidenceOnly: true, + topic, + commitCount: result.commitCount, + citations: result.citations, + evidence: result.evidence, + }, null, 2) + } else if (format === 'json') { text = JSON.stringify({ prose: result.prose, commitCount: result.commitCount, diff --git a/src/server/app.ts b/src/server/app.ts index e06a7a4..73dd43e 100644 --- a/src/server/app.ts +++ b/src/server/app.ts @@ -58,6 +58,7 @@ import { watchRouter } from './routes/watch.js' import { projectionsRouter } from './routes/projections.js' import { openapiRouter } from './routes/openapi.js' import { narratorRouter } from './routes/narrator.js' +import guideRouter from './routes/guide.js' import { getActiveSession } from '../core/db/sqlite.js' import { readFileSync, existsSync } from 'node:fs' import { join, dirname } from 'node:path' @@ -169,6 +170,9 @@ export function createApp(options: AppOptions): Express { // Narrator routes (LLM-powered narrative generation) app.use(`${base}`, narratorRouter) + // Guide routes (interactive LLM chat with gitsema tool access) + app.use(`${base}/guide`, guideRouter) + // Phase 64: Capabilities manifest — machine-readable list of server capabilities app.get(`${base}/capabilities`, (_req, res) => { res.json({ diff --git a/src/server/routes/guide.ts b/src/server/routes/guide.ts new file mode 100644 index 0000000..3121818 --- /dev/null +++ b/src/server/routes/guide.ts @@ -0,0 +1,58 @@ +/** + * HTTP routes for the guide chat feature. + * + * POST /api/v1/guide/chat — OpenAI-compatible single-turn chat endpoint. + * Body: { question: string, model?: string, guideModelId?: number, includeContext?: boolean } + * Response: { answer, contextUsed, llmEnabled, citations } + * + * For full OpenAI /v1/chat/completions compatibility including function_calls, + * see docs/chattydeer_contract.md for the required chattydeer API extensions. + */ + +import { Router, type Request, type Response } from 'express' +import { z } from 'zod' +import { runGuide } from '../../cli/commands/guide.js' + +const router = Router() + +const GuideChatBodySchema = z.object({ + question: z.string().min(1).max(4000).describe('The question to ask the guide'), + model: z.string().optional().describe('Guide/narrator model name to use (overrides active selection)'), + guide_model_id: z.number().int().positive().optional().describe('embed_config.id of the guide model'), + include_context: z.boolean().optional().default(true).describe('Whether to gather git context before answering (default: true)'), +}) + +/** + * POST /api/v1/guide/chat + * + * Ask the gitsema guide a question about the repository. + * Uses the active guide model (falls back to narrator model). + * Returns the answer + whether LLM was enabled. + */ +router.post('/chat', async (req: Request, res: Response) => { + const parsed = GuideChatBodySchema.safeParse(req.body) + if (!parsed.success) { + res.status(400).json({ error: 'Invalid request body', details: parsed.error.issues }) + return + } + + const { question, model, guide_model_id, include_context } = parsed.data + + try { + const result = await runGuide(question, { + guideModelId: guide_model_id, + model, + includeContext: include_context, + }) + + res.json({ + answer: result.answer, + contextUsed: result.contextUsed, + llmEnabled: result.llmEnabled, + }) + } catch (err) { + res.status(500).json({ error: err instanceof Error ? err.message : String(err) }) + } +}) + +export default router diff --git a/yarn.lock b/yarn.lock index 628b64d..f7f8c04 100644 --- a/yarn.lock +++ b/yarn.lock @@ -9,6 +9,13 @@ dependencies: openapi3-ts "^4.1.2" +"@emnapi/runtime@^1.7.0": + version "1.9.2" + resolved "https://registry.yarnpkg.com/@emnapi/runtime/-/runtime-1.9.2.tgz#8b469a3db160817cadb1de9050211a9d1ea84fa2" + integrity sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw== + dependencies: + tslib "^2.4.0" + "@esbuild-kit/core-utils@^3.3.2": version "3.3.2" resolved "https://registry.npmjs.org/@esbuild-kit/core-utils/-/core-utils-3.3.2.tgz" @@ -25,6 +32,241 @@ "@esbuild-kit/core-utils" "^3.3.2" get-tsconfig "^4.7.0" +"@esbuild/aix-ppc64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz#d1bc06aedb6936b3b6d313bf809a5a40387d2b7f" + integrity sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA== + +"@esbuild/aix-ppc64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.27.4.tgz#4c585002f7ad694d38fe0e8cbf5cfd939ccff327" + integrity sha512-cQPwL2mp2nSmHHJlCyoXgHGhbEPMrEEU5xhkcy3Hs/O7nGZqEpZ2sUtLaL9MORLtDfRvVl2/3PAuEkYZH0Ty8Q== + +"@esbuild/android-arm64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz#984b4f9c8d0377443cc2dfcef266d02244593622" + integrity sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ== + +"@esbuild/android-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz#7ad65a36cfdb7e0d429c353e00f680d737c2aed4" + integrity sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA== + +"@esbuild/android-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.27.4.tgz#7625d0952c3b402d3ede203a16c9f2b78f8a4827" + integrity sha512-gdLscB7v75wRfu7QSm/zg6Rx29VLdy9eTr2t44sfTW7CxwAtQghZ4ZnqHk3/ogz7xao0QAgrkradbBzcqFPasw== + +"@esbuild/android-arm@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.18.20.tgz#fedb265bc3a589c84cc11f810804f234947c3682" + integrity sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw== + +"@esbuild/android-arm@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.19.12.tgz#b0c26536f37776162ca8bde25e42040c203f2824" + integrity sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w== + +"@esbuild/android-arm@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.27.4.tgz#9a0cf1d12997ec46dddfb32ce67e9bca842381ac" + integrity sha512-X9bUgvxiC8CHAGKYufLIHGXPJWnr0OCdR0anD2e21vdvgCI8lIfqFbnoeOz7lBjdrAGUhqLZLcQo6MLhTO2DKQ== + +"@esbuild/android-x64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.18.20.tgz#35cf419c4cfc8babe8893d296cd990e9e9f756f2" + integrity sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg== + +"@esbuild/android-x64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.19.12.tgz#cb13e2211282012194d89bf3bfe7721273473b3d" + integrity sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew== + +"@esbuild/android-x64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.27.4.tgz#06e1fdc6283fccd6bc6aadd6754afce6cf96f42e" + integrity sha512-PzPFnBNVF292sfpfhiyiXCGSn9HZg5BcAz+ivBuSsl6Rk4ga1oEXAamhOXRFyMcjwr2DVtm40G65N3GLeH1Lvw== + +"@esbuild/darwin-arm64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz#08172cbeccf95fbc383399a7f39cfbddaeb0d7c1" + integrity sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA== + +"@esbuild/darwin-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz#cbee41e988020d4b516e9d9e44dd29200996275e" + integrity sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g== + +"@esbuild/darwin-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.27.4.tgz#6c550ee6c0273bcb0fac244478ff727c26755d80" + integrity sha512-b7xaGIwdJlht8ZFCvMkpDN6uiSmnxxK56N2GDTMYPr2/gzvfdQN8rTfBsvVKmIVY/X7EM+/hJKEIbbHs9oA4tQ== + +"@esbuild/darwin-x64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz#d70d5790d8bf475556b67d0f8b7c5bdff053d85d" + integrity sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ== + +"@esbuild/darwin-x64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz#e37d9633246d52aecf491ee916ece709f9d5f4cd" + integrity sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A== + +"@esbuild/darwin-x64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.27.4.tgz#ed7a125e9f25ce0091b9aff783ee943f6ba6cb86" + integrity sha512-sR+OiKLwd15nmCdqpXMnuJ9W2kpy0KigzqScqHI3Hqwr7IXxBp3Yva+yJwoqh7rE8V77tdoheRYataNKL4QrPw== + +"@esbuild/freebsd-arm64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz#98755cd12707f93f210e2494d6a4b51b96977f54" + integrity sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw== + +"@esbuild/freebsd-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz#1ee4d8b682ed363b08af74d1ea2b2b4dbba76487" + integrity sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA== + +"@esbuild/freebsd-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.27.4.tgz#597dc8e7161dba71db4c1656131c1f1e9d7660c6" + integrity sha512-jnfpKe+p79tCnm4GVav68A7tUFeKQwQyLgESwEAUzyxk/TJr4QdGog9sqWNcUbr/bZt/O/HXouspuQDd9JxFSw== + +"@esbuild/freebsd-x64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz#c1eb2bff03915f87c29cece4c1a7fa1f423b066e" + integrity sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ== + +"@esbuild/freebsd-x64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz#37a693553d42ff77cd7126764b535fb6cc28a11c" + integrity sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg== + +"@esbuild/freebsd-x64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.27.4.tgz#ea171f9f4f00efaa8e9d3fe8baa1b75d757d1b36" + integrity sha512-2kb4ceA/CpfUrIcTUl1wrP/9ad9Atrp5J94Lq69w7UwOMolPIGrfLSvAKJp0RTvkPPyn6CIWrNy13kyLikZRZQ== + +"@esbuild/linux-arm64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz#bad4238bd8f4fc25b5a021280c770ab5fc3a02a0" + integrity sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA== + +"@esbuild/linux-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz#be9b145985ec6c57470e0e051d887b09dddb2d4b" + integrity sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA== + +"@esbuild/linux-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.27.4.tgz#e52d57f202369386e6dbcb3370a17a0491ab1464" + integrity sha512-7nQOttdzVGth1iz57kxg9uCz57dxQLHWxopL6mYuYthohPKEK0vU0C3O21CcBK6KDlkYVcnDXY099HcCDXd9dA== + +"@esbuild/linux-arm@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz#3e617c61f33508a27150ee417543c8ab5acc73b0" + integrity sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg== + +"@esbuild/linux-arm@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz#207ecd982a8db95f7b5279207d0ff2331acf5eef" + integrity sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w== + +"@esbuild/linux-arm@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.27.4.tgz#5e0c0b634908adbce0a02cebeba8b3acac263fb6" + integrity sha512-aBYgcIxX/wd5n2ys0yESGeYMGF+pv6g0DhZr3G1ZG4jMfruU9Tl1i2Z+Wnj9/KjGz1lTLCcorqE2viePZqj4Eg== + +"@esbuild/linux-ia32@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz#699391cccba9aee6019b7f9892eb99219f1570a7" + integrity sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA== + +"@esbuild/linux-ia32@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz#d0d86b5ca1562523dc284a6723293a52d5860601" + integrity sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA== + +"@esbuild/linux-ia32@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.27.4.tgz#5f90f01f131652473ec06b038a14c49683e14ec7" + integrity sha512-oPtixtAIzgvzYcKBQM/qZ3R+9TEUd1aNJQu0HhGyqtx6oS7qTpvjheIWBbes4+qu1bNlo2V4cbkISr8q6gRBFA== + +"@esbuild/linux-loong64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz#e6fccb7aac178dd2ffb9860465ac89d7f23b977d" + integrity sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg== + +"@esbuild/linux-loong64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz#9a37f87fec4b8408e682b528391fa22afd952299" + integrity sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA== + +"@esbuild/linux-loong64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.27.4.tgz#63bacffdb99574c9318f9afbd0dd4fff76a837e3" + integrity sha512-8mL/vh8qeCoRcFH2nM8wm5uJP+ZcVYGGayMavi8GmRJjuI3g1v6Z7Ni0JJKAJW+m0EtUuARb6Lmp4hMjzCBWzA== + +"@esbuild/linux-mips64el@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz#eeff3a937de9c2310de30622a957ad1bd9183231" + integrity sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ== + +"@esbuild/linux-mips64el@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz#4ddebd4e6eeba20b509d8e74c8e30d8ace0b89ec" + integrity sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w== + +"@esbuild/linux-mips64el@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.27.4.tgz#c4b6952eca6a8efff67fee3671a3536c8e67b7eb" + integrity sha512-1RdrWFFiiLIW7LQq9Q2NES+HiD4NyT8Itj9AUeCl0IVCA459WnPhREKgwrpaIfTOe+/2rdntisegiPWn/r/aAw== + +"@esbuild/linux-ppc64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz#2f7156bde20b01527993e6881435ad79ba9599fb" + integrity sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA== + +"@esbuild/linux-ppc64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz#adb67dadb73656849f63cd522f5ecb351dd8dee8" + integrity sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg== + +"@esbuild/linux-ppc64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.27.4.tgz#6dea67d3d98c6986f1b7769e4f1848e5ae47ad58" + integrity sha512-tLCwNG47l3sd9lpfyx9LAGEGItCUeRCWeAx6x2Jmbav65nAwoPXfewtAdtbtit/pJFLUWOhpv0FpS6GQAmPrHA== + +"@esbuild/linux-riscv64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz#6628389f210123d8b4743045af8caa7d4ddfc7a6" + integrity sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A== + +"@esbuild/linux-riscv64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz#11bc0698bf0a2abf8727f1c7ace2112612c15adf" + integrity sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg== + +"@esbuild/linux-riscv64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.27.4.tgz#9ad2b4c3c0502c6bada9c81997bb56c597853489" + integrity sha512-BnASypppbUWyqjd1KIpU4AUBiIhVr6YlHx/cnPgqEkNoVOhHg+YiSVxM1RLfiy4t9cAulbRGTNCKOcqHrEQLIw== + +"@esbuild/linux-s390x@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz#255e81fb289b101026131858ab99fba63dcf0071" + integrity sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ== + +"@esbuild/linux-s390x@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz#e86fb8ffba7c5c92ba91fc3b27ed5a70196c3cc8" + integrity sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg== + +"@esbuild/linux-s390x@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.27.4.tgz#c43d3cfd073042ca6f5c52bb9bc313ed2066ce28" + integrity sha512-+eUqgb/Z7vxVLezG8bVB9SfBie89gMueS+I0xYh2tJdw3vqA/0ImZJ2ROeWwVJN59ihBeZ7Tu92dF/5dy5FttA== + "@esbuild/linux-x64@0.18.20": version "0.18.20" resolved "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz" @@ -40,6 +282,111 @@ resolved "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.27.4.tgz" integrity sha512-S5qOXrKV8BQEzJPVxAwnryi2+Iq5pB40gTEIT69BQONqR7JH1EPIcQ/Uiv9mCnn05jff9umq/5nqzxlqTOg9NA== +"@esbuild/netbsd-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-arm64/-/netbsd-arm64-0.27.4.tgz#366b0ef40cdb986fc751cbdad16e8c25fe1ba879" + integrity sha512-xHT8X4sb0GS8qTqiwzHqpY00C95DPAq7nAwX35Ie/s+LO9830hrMd3oX0ZMKLvy7vsonee73x0lmcdOVXFzd6Q== + +"@esbuild/netbsd-x64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz#30e8cd8a3dded63975e2df2438ca109601ebe0d1" + integrity sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A== + +"@esbuild/netbsd-x64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz#29da566a75324e0d0dd7e47519ba2f7ef168657b" + integrity sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA== + +"@esbuild/netbsd-x64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.27.4.tgz#e985d49a3668fd2044343071d52e1ae815112b3e" + integrity sha512-RugOvOdXfdyi5Tyv40kgQnI0byv66BFgAqjdgtAKqHoZTbTF2QqfQrFwa7cHEORJf6X2ht+l9ABLMP0dnKYsgg== + +"@esbuild/openbsd-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-arm64/-/openbsd-arm64-0.27.4.tgz#6fb4ab7b73f7e5572ce5ec9cf91c13ff6dd44842" + integrity sha512-2MyL3IAaTX+1/qP0O1SwskwcwCoOI4kV2IBX1xYnDDqthmq5ArrW94qSIKCAuRraMgPOmG0RDTA74mzYNQA9ow== + +"@esbuild/openbsd-x64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz#7812af31b205055874c8082ea9cf9ab0da6217ae" + integrity sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg== + +"@esbuild/openbsd-x64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz#306c0acbdb5a99c95be98bdd1d47c916e7dc3ff0" + integrity sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw== + +"@esbuild/openbsd-x64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.27.4.tgz#641f052040a0d79843d68898f5791638a026d983" + integrity sha512-u8fg/jQ5aQDfsnIV6+KwLOf1CmJnfu1ShpwqdwC0uA7ZPwFws55Ngc12vBdeUdnuWoQYx/SOQLGDcdlfXhYmXQ== + +"@esbuild/openharmony-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/openharmony-arm64/-/openharmony-arm64-0.27.4.tgz#fc1d33eac9d81ae0a433b3ed1dd6171a20d4e317" + integrity sha512-JkTZrl6VbyO8lDQO3yv26nNr2RM2yZzNrNHEsj9bm6dOwwu9OYN28CjzZkH57bh4w0I2F7IodpQvUAEd1mbWXg== + +"@esbuild/sunos-x64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz#d5c275c3b4e73c9b0ecd38d1ca62c020f887ab9d" + integrity sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ== + +"@esbuild/sunos-x64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz#0933eaab9af8b9b2c930236f62aae3fc593faf30" + integrity sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA== + +"@esbuild/sunos-x64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.27.4.tgz#af2cd5ca842d6d057121f66a192d4f797de28f53" + integrity sha512-/gOzgaewZJfeJTlsWhvUEmUG4tWEY2Spp5M20INYRg2ZKl9QPO3QEEgPeRtLjEWSW8FilRNacPOg8R1uaYkA6g== + +"@esbuild/win32-arm64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz#73bc7f5a9f8a77805f357fab97f290d0e4820ac9" + integrity sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg== + +"@esbuild/win32-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz#773bdbaa1971b36db2f6560088639ccd1e6773ae" + integrity sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A== + +"@esbuild/win32-arm64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.27.4.tgz#78ec7e59bb06404583d4c9511e621db31c760de3" + integrity sha512-Z9SExBg2y32smoDQdf1HRwHRt6vAHLXcxD2uGgO/v2jK7Y718Ix4ndsbNMU/+1Qiem9OiOdaqitioZwxivhXYg== + +"@esbuild/win32-ia32@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz#ec93cbf0ef1085cc12e71e0d661d20569ff42102" + integrity sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g== + +"@esbuild/win32-ia32@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz#000516cad06354cc84a73f0943a4aa690ef6fd67" + integrity sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ== + +"@esbuild/win32-ia32@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.27.4.tgz#0e616aa488b7ee5d2592ab070ff9ec06a9fddf11" + integrity sha512-DAyGLS0Jz5G5iixEbMHi5KdiApqHBWMGzTtMiJ72ZOLhbu/bzxgAe8Ue8CTS3n3HbIUHQz/L51yMdGMeoxXNJw== + +"@esbuild/win32-x64@0.18.20": + version "0.18.20" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz#786c5f41f043b07afb1af37683d7c33668858f6d" + integrity sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ== + +"@esbuild/win32-x64@0.19.12": + version "0.19.12" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz#c57c8afbb4054a3ab8317591a0b7320360b444ae" + integrity sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA== + +"@esbuild/win32-x64@0.27.4": + version "0.27.4" + resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.27.4.tgz#1f7ba71a3d6155d44a6faa8dbe249c62ab3e408c" + integrity sha512-+knoa0BDoeXgkNvvV1vvbZX4+hizelrkwmGJBdT17t8FNPwG2lKemmuMZlmaNQ3ws3DKKCxpb4zRZEIp3UxFCg== + "@hono/node-server@^1.19.9": version "1.19.12" resolved "https://registry.npmjs.org/@hono/node-server/-/node-server-1.19.12.tgz" @@ -71,11 +418,105 @@ resolved "https://registry.npmjs.org/@img/colour/-/colour-1.1.0.tgz" integrity sha512-Td76q7j57o/tLVdgS746cYARfSyxk8iEfRxewL9h4OMzYhbW4TAcppl0mT4eyqXddh6L/jwoM75mo7ixa/pCeQ== +"@img/sharp-darwin-arm64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.34.5.tgz#6e0732dcade126b6670af7aa17060b926835ea86" + integrity sha512-imtQ3WMJXbMY4fxb/Ndp6HBTNVtWCUI0WdobyheGf5+ad6xX8VIDO8u2xE4qc/fr08CKG/7dDseFtn6M6g/r3w== + optionalDependencies: + "@img/sharp-libvips-darwin-arm64" "1.2.4" + +"@img/sharp-darwin-x64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.34.5.tgz#19bc1dd6eba6d5a96283498b9c9f401180ee9c7b" + integrity sha512-YNEFAF/4KQ/PeW0N+r+aVVsoIY0/qxxikF2SWdp+NRkmMB7y9LBZAVqQ4yhGCm/H3H270OSykqmQMKLBhBJDEw== + optionalDependencies: + "@img/sharp-libvips-darwin-x64" "1.2.4" + +"@img/sharp-libvips-darwin-arm64@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.2.4.tgz#2894c0cb87d42276c3889942e8e2db517a492c43" + integrity sha512-zqjjo7RatFfFoP0MkQ51jfuFZBnVE2pRiaydKJ1G/rHZvnsrHAOcQALIi9sA5co5xenQdTugCvtb1cuf78Vf4g== + +"@img/sharp-libvips-darwin-x64@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.2.4.tgz#e63681f4539a94af9cd17246ed8881734386f8cc" + integrity sha512-1IOd5xfVhlGwX+zXv2N93k0yMONvUlANylbJw1eTah8K/Jtpi15KC+WSiaX/nBmbm2HxRM1gZ0nSdjSsrZbGKg== + +"@img/sharp-libvips-linux-arm64@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.2.4.tgz#b1b288b36864b3bce545ad91fa6dadcf1a4ad318" + integrity sha512-excjX8DfsIcJ10x1Kzr4RcWe1edC9PquDRRPx3YVCvQv+U5p7Yin2s32ftzikXojb1PIFc/9Mt28/y+iRklkrw== + +"@img/sharp-libvips-linux-arm@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.2.4.tgz#b9260dd1ebe6f9e3bdbcbdcac9d2ac125f35852d" + integrity sha512-bFI7xcKFELdiNCVov8e44Ia4u2byA+l3XtsAj+Q8tfCwO6BQ8iDojYdvoPMqsKDkuoOo+X6HZA0s0q11ANMQ8A== + +"@img/sharp-libvips-linux-ppc64@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-linux-ppc64/-/sharp-libvips-linux-ppc64-1.2.4.tgz#4b83ecf2a829057222b38848c7b022e7b4d07aa7" + integrity sha512-FMuvGijLDYG6lW+b/UvyilUWu5Ayu+3r2d1S8notiGCIyYU/76eig1UfMmkZ7vwgOrzKzlQbFSuQfgm7GYUPpA== + +"@img/sharp-libvips-linux-riscv64@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-linux-riscv64/-/sharp-libvips-linux-riscv64-1.2.4.tgz#880b4678009e5a2080af192332b00b0aaf8a48de" + integrity sha512-oVDbcR4zUC0ce82teubSm+x6ETixtKZBh/qbREIOcI3cULzDyb18Sr/Wcyx7NRQeQzOiHTNbZFF1UwPS2scyGA== + +"@img/sharp-libvips-linux-s390x@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.2.4.tgz#74f343c8e10fad821b38f75ced30488939dc59ec" + integrity sha512-qmp9VrzgPgMoGZyPvrQHqk02uyjA0/QrTO26Tqk6l4ZV0MPWIW6LTkqOIov+J1yEu7MbFQaDpwdwJKhbJvuRxQ== + "@img/sharp-libvips-linux-x64@1.2.4": version "1.2.4" resolved "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.2.4.tgz" integrity sha512-tJxiiLsmHc9Ax1bz3oaOYBURTXGIRDODBqhveVHonrHJ9/+k89qbLl0bcJns+e4t4rvaNBxaEZsFtSfAdquPrw== +"@img/sharp-libvips-linuxmusl-arm64@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.2.4.tgz#c8d6b48211df67137541007ee8d1b7b1f8ca8e06" + integrity sha512-FVQHuwx1IIuNow9QAbYUzJ+En8KcVm9Lk5+uGUQJHaZmMECZmOlix9HnH7n1TRkXMS0pGxIJokIVB9SuqZGGXw== + +"@img/sharp-libvips-linuxmusl-x64@1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.2.4.tgz#be11c75bee5b080cbee31a153a8779448f919f75" + integrity sha512-+LpyBk7L44ZIXwz/VYfglaX/okxezESc6UxDSoyo2Ks6Jxc4Y7sGjpgU9s4PMgqgjj1gZCylTieNamqA1MF7Dg== + +"@img/sharp-linux-arm64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.34.5.tgz#7aa7764ef9c001f15e610546d42fce56911790cc" + integrity sha512-bKQzaJRY/bkPOXyKx5EVup7qkaojECG6NLYswgktOZjaXecSAeCWiZwwiFf3/Y+O1HrauiE3FVsGxFg8c24rZg== + optionalDependencies: + "@img/sharp-libvips-linux-arm64" "1.2.4" + +"@img/sharp-linux-arm@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-linux-arm/-/sharp-linux-arm-0.34.5.tgz#5fb0c3695dd12522d39c3ff7a6bc816461780a0d" + integrity sha512-9dLqsvwtg1uuXBGZKsxem9595+ujv0sJ6Vi8wcTANSFpwV/GONat5eCkzQo/1O6zRIkh0m/8+5BjrRr7jDUSZw== + optionalDependencies: + "@img/sharp-libvips-linux-arm" "1.2.4" + +"@img/sharp-linux-ppc64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-linux-ppc64/-/sharp-linux-ppc64-0.34.5.tgz#9c213a81520a20caf66978f3d4c07456ff2e0813" + integrity sha512-7zznwNaqW6YtsfrGGDA6BRkISKAAE1Jo0QdpNYXNMHu2+0dTrPflTLNkpc8l7MUP5M16ZJcUvysVWWrMefZquA== + optionalDependencies: + "@img/sharp-libvips-linux-ppc64" "1.2.4" + +"@img/sharp-linux-riscv64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-linux-riscv64/-/sharp-linux-riscv64-0.34.5.tgz#cdd28182774eadbe04f62675a16aabbccb833f60" + integrity sha512-51gJuLPTKa7piYPaVs8GmByo7/U7/7TZOq+cnXJIHZKavIRHAP77e3N2HEl3dgiqdD/w0yUfiJnII77PuDDFdw== + optionalDependencies: + "@img/sharp-libvips-linux-riscv64" "1.2.4" + +"@img/sharp-linux-s390x@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.34.5.tgz#93eac601b9f329bb27917e0e19098c722d630df7" + integrity sha512-nQtCk0PdKfho3eC5MrbQoigJ2gd1CgddUMkabUj+rBevs8tZ2cULOx46E7oyX+04WGfABgIwmMC0VqieTiR4jg== + optionalDependencies: + "@img/sharp-libvips-linux-s390x" "1.2.4" + "@img/sharp-linux-x64@0.34.5": version "0.34.5" resolved "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.34.5.tgz" @@ -83,6 +524,42 @@ optionalDependencies: "@img/sharp-libvips-linux-x64" "1.2.4" +"@img/sharp-linuxmusl-arm64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.34.5.tgz#d6515ee971bb62f73001a4829b9d865a11b77086" + integrity sha512-fprJR6GtRsMt6Kyfq44IsChVZeGN97gTD331weR1ex1c1rypDEABN6Tm2xa1wE6lYb5DdEnk03NZPqA7Id21yg== + optionalDependencies: + "@img/sharp-libvips-linuxmusl-arm64" "1.2.4" + +"@img/sharp-linuxmusl-x64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.34.5.tgz#d97978aec7c5212f999714f2f5b736457e12ee9f" + integrity sha512-Jg8wNT1MUzIvhBFxViqrEhWDGzqymo3sV7z7ZsaWbZNDLXRJZoRGrjulp60YYtV4wfY8VIKcWidjojlLcWrd8Q== + optionalDependencies: + "@img/sharp-libvips-linuxmusl-x64" "1.2.4" + +"@img/sharp-wasm32@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-wasm32/-/sharp-wasm32-0.34.5.tgz#2f15803aa626f8c59dd7c9d0bbc766f1ab52cfa0" + integrity sha512-OdWTEiVkY2PHwqkbBI8frFxQQFekHaSSkUIJkwzclWZe64O1X4UlUjqqqLaPbUpMOQk6FBu/HtlGXNblIs0huw== + dependencies: + "@emnapi/runtime" "^1.7.0" + +"@img/sharp-win32-arm64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-win32-arm64/-/sharp-win32-arm64-0.34.5.tgz#3706e9e3ac35fddfc1c87f94e849f1b75307ce0a" + integrity sha512-WQ3AgWCWYSb2yt+IG8mnC6Jdk9Whs7O0gxphblsLvdhSpSTtmu69ZG1Gkb6NuvxsNACwiPV6cNSZNzt0KPsw7g== + +"@img/sharp-win32-ia32@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.34.5.tgz#0b71166599b049e032f085fb9263e02f4e4788de" + integrity sha512-FV9m/7NmeCmSHDD5j4+4pNI8Cp3aW+JvLoXcTUo0IqyjSfAZJ8dIUmijx1qaJsIiU+Hosw6xM5KijAWRJCSgNg== + +"@img/sharp-win32-x64@0.34.5": + version "0.34.5" + resolved "https://registry.yarnpkg.com/@img/sharp-win32-x64/-/sharp-win32-x64-0.34.5.tgz#a81ffb00e69267cd0a1d626eaedb8a8430b2b2f8" + integrity sha512-+29YMsqY2/9eFEiW93eqWnuLcWcufowXewwSNIT6UwZdUUCrM3oFjMWH/Z6/TMmb4hlFenmfAVbpWeup2jryCw== + "@jridgewell/sourcemap-codec@^1.5.5": version "1.5.5" resolved "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz" @@ -96,13 +573,6 @@ "@huggingface/transformers" "^4.0.1" "@jsilvanus/embedeer" "1.3.1" -"@jsilvanus/embedeer@^1.3.2": - version "1.3.2" - resolved "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.3.2.tgz" - integrity sha512-7MGHSzxkLPGtwEylAgKok2DEsxvSZC6PrBLCoPfsDEAi4tpGUoSQNnfTfzqSjvStuiNLrIsvsCvfZpoEllJ83Q== - dependencies: - "@huggingface/transformers" "^4.0.1" - "@jsilvanus/embedeer@1.3.1": version "1.3.1" resolved "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.3.1.tgz" @@ -110,6 +580,13 @@ dependencies: "@huggingface/transformers" "^4.0.1" +"@jsilvanus/embedeer@^1.3.2": + version "1.3.2" + resolved "https://registry.npmjs.org/@jsilvanus/embedeer/-/embedeer-1.3.2.tgz" + integrity sha512-7MGHSzxkLPGtwEylAgKok2DEsxvSZC6PrBLCoPfsDEAi4tpGUoSQNnfTfzqSjvStuiNLrIsvsCvfZpoEllJ83Q== + dependencies: + "@huggingface/transformers" "^4.0.1" + "@modelcontextprotocol/sdk@^1.29.0": version "1.29.0" resolved "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.29.0.tgz" @@ -133,6 +610,13 @@ zod "^3.25 || ^4.0" zod-to-json-schema "^3.25.1" +"@napi-rs/wasm-runtime@^1.1.1": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@napi-rs/wasm-runtime/-/wasm-runtime-1.1.3.tgz#1eeb8699770481306e5fcd84471f20fcb6177336" + integrity sha512-xK9sGVbJWYb08+mTJt3/YV24WxvxpXcXtP6B172paPZ+Ts69Re9dAr7lKwJoeIx8OoeuimEiRZ7umkiUVClmmQ== + dependencies: + "@tybys/wasm-util" "^0.10.1" + "@noble/hashes@^1.1.5": version "1.8.0" resolved "https://registry.npmjs.org/@noble/hashes/-/hashes-1.8.0.tgz" @@ -208,6 +692,51 @@ resolved "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz" integrity sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw== +"@rolldown/binding-android-arm64@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-android-arm64/-/binding-android-arm64-1.0.0-rc.12.tgz#4e6af08b89da02596cc5da4b105082b68673ffec" + integrity sha512-pv1y2Fv0JybcykuiiD3qBOBdz6RteYojRFY1d+b95WVuzx211CRh+ytI/+9iVyWQ6koTh5dawe4S/yRfOFjgaA== + +"@rolldown/binding-darwin-arm64@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-darwin-arm64/-/binding-darwin-arm64-1.0.0-rc.12.tgz#a06890f4c9b48ff0fc97edbedfc762bef7cffd73" + integrity sha512-cFYr6zTG/3PXXF3pUO+umXxt1wkRK/0AYT8lDwuqvRC+LuKYWSAQAQZjCWDQpAH172ZV6ieYrNnFzVVcnSflAg== + +"@rolldown/binding-darwin-x64@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-darwin-x64/-/binding-darwin-x64-1.0.0-rc.12.tgz#eddf6aa3ed3509171fe21711f1e8ec8e0fd7ec49" + integrity sha512-ZCsYknnHzeXYps0lGBz8JrF37GpE9bFVefrlmDrAQhOEi4IOIlcoU1+FwHEtyXGx2VkYAvhu7dyBf75EJQffBw== + +"@rolldown/binding-freebsd-x64@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-freebsd-x64/-/binding-freebsd-x64-1.0.0-rc.12.tgz#2102dfed19fd1f1b53435fcaaf0bc61129a266a3" + integrity sha512-dMLeprcVsyJsKolRXyoTH3NL6qtsT0Y2xeuEA8WQJquWFXkEC4bcu1rLZZSnZRMtAqwtrF/Ib9Ddtpa/Gkge9Q== + +"@rolldown/binding-linux-arm-gnueabihf@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-linux-arm-gnueabihf/-/binding-linux-arm-gnueabihf-1.0.0-rc.12.tgz#b2c13f40e990fd1e1935492850536c768c961a0f" + integrity sha512-YqWjAgGC/9M1lz3GR1r1rP79nMgo3mQiiA+Hfo+pvKFK1fAJ1bCi0ZQVh8noOqNacuY1qIcfyVfP6HoyBRZ85Q== + +"@rolldown/binding-linux-arm64-gnu@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-linux-arm64-gnu/-/binding-linux-arm64-gnu-1.0.0-rc.12.tgz#32ca9f77c1e76b2913b3d53d2029dc171c0532d6" + integrity sha512-/I5AS4cIroLpslsmzXfwbe5OmWvSsrFuEw3mwvbQ1kDxJ822hFHIx+vsN/TAzNVyepI/j/GSzrtCIwQPeKCLIg== + +"@rolldown/binding-linux-arm64-musl@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-linux-arm64-musl/-/binding-linux-arm64-musl-1.0.0-rc.12.tgz#f4337ddd52f0ed3ada2105b59ee1b757a2c4858c" + integrity sha512-V6/wZztnBqlx5hJQqNWwFdxIKN0m38p8Jas+VoSfgH54HSj9tKTt1dZvG6JRHcjh6D7TvrJPWFGaY9UBVOaWPw== + +"@rolldown/binding-linux-ppc64-gnu@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-linux-ppc64-gnu/-/binding-linux-ppc64-gnu-1.0.0-rc.12.tgz#22fdd14cb00ee8208c28a39bab7f28860ec6705d" + integrity sha512-AP3E9BpcUYliZCxa3w5Kwj9OtEVDYK6sVoUzy4vTOJsjPOgdaJZKFmN4oOlX0Wp0RPV2ETfmIra9x1xuayFB7g== + +"@rolldown/binding-linux-s390x-gnu@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-linux-s390x-gnu/-/binding-linux-s390x-gnu-1.0.0-rc.12.tgz#838215096d1de6d3d509e0410801cb7cda8161ff" + integrity sha512-nWwpvUSPkoFmZo0kQazZYOrT7J5DGOJ/+QHHzjvNlooDZED8oH82Yg67HvehPPLAg5fUff7TfWFHQS8IV1n3og== + "@rolldown/binding-linux-x64-gnu@1.0.0-rc.12": version "1.0.0-rc.12" resolved "https://registry.npmjs.org/@rolldown/binding-linux-x64-gnu/-/binding-linux-x64-gnu-1.0.0-rc.12.tgz" @@ -218,6 +747,28 @@ resolved "https://registry.npmjs.org/@rolldown/binding-linux-x64-musl/-/binding-linux-x64-musl-1.0.0-rc.12.tgz" integrity sha512-Jpw/0iwoKWx3LJ2rc1yjFrj+T7iHZn2JDg1Yny1ma0luviFS4mhAIcd1LFNxK3EYu3DHWCps0ydXQ5i/rrJ2ig== +"@rolldown/binding-openharmony-arm64@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-openharmony-arm64/-/binding-openharmony-arm64-1.0.0-rc.12.tgz#f66317e29eafcc300bed7af8dddac26ab3b1bf82" + integrity sha512-vRugONE4yMfVn0+7lUKdKvN4D5YusEiPilaoO2sgUWpCvrncvWgPMzK00ZFFJuiPgLwgFNP5eSiUlv2tfc+lpA== + +"@rolldown/binding-wasm32-wasi@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-wasm32-wasi/-/binding-wasm32-wasi-1.0.0-rc.12.tgz#8825523fdffa1f1dc4683be9650ffaa9e4a77f04" + integrity sha512-ykGiLr/6kkiHc0XnBfmFJuCjr5ZYKKofkx+chJWDjitX+KsJuAmrzWhwyOMSHzPhzOHOy7u9HlFoa5MoAOJ/Zg== + dependencies: + "@napi-rs/wasm-runtime" "^1.1.1" + +"@rolldown/binding-win32-arm64-msvc@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-win32-arm64-msvc/-/binding-win32-arm64-msvc-1.0.0-rc.12.tgz#4f3a17e3d68a58309c27c0930b0f7986ccabef47" + integrity sha512-5eOND4duWkwx1AzCxadcOrNeighiLwMInEADT0YM7xeEOOFcovWZCq8dadXgcRHSf3Ulh1kFo/qvzoFiCLOL1Q== + +"@rolldown/binding-win32-x64-msvc@1.0.0-rc.12": + version "1.0.0-rc.12" + resolved "https://registry.yarnpkg.com/@rolldown/binding-win32-x64-msvc/-/binding-win32-x64-msvc-1.0.0-rc.12.tgz#d762765d5660598a96b570b513f535c151272985" + integrity sha512-PyqoipaswDLAZtot351MLhrlrh6lcZPo2LSYE+VDxbVk24LVKAGOuE4hb8xZQmrPAuEtTZW8E6D2zc5EUZX4Lw== + "@rolldown/pluginutils@1.0.0-rc.12": version "1.0.0-rc.12" resolved "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-rc.12.tgz" @@ -228,6 +779,13 @@ resolved "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz" integrity sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w== +"@tybys/wasm-util@^0.10.1": + version "0.10.1" + resolved "https://registry.yarnpkg.com/@tybys/wasm-util/-/wasm-util-0.10.1.tgz#ecddd3205cf1e2d5274649ff0eedd2991ed7f414" + integrity sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg== + dependencies: + tslib "^2.4.0" + "@types/better-sqlite3@^7.6.13": version "7.6.13" resolved "https://registry.npmjs.org/@types/better-sqlite3/-/better-sqlite3-7.6.13.tgz" @@ -302,7 +860,7 @@ resolved "https://registry.npmjs.org/@types/methods/-/methods-1.1.4.tgz" integrity sha512-ymXWVrDiCxTBE3+RIrrP533E70eA+9qu7zdWoHuOmGujkYtzf4HQF96b8nwHLqhuf4ykX61IGRIB38CC6/sImQ== -"@types/node@*", "@types/node@^20.14.0", "@types/node@>=13.7.0": +"@types/node@*", "@types/node@>=13.7.0", "@types/node@^20.14.0": version "20.19.37" resolved "https://registry.npmjs.org/@types/node/-/node-20.19.37.tgz" integrity sha512-8kzdPJ3FsNsVIurqBs7oodNnCEVbni9yUEkaHbgptDACOPW04jimGagZ51E6+lXUwJjgnBw+hyko/lkFWCldqw== @@ -975,7 +1533,7 @@ expect-type@^1.3.0: resolved "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz" integrity sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA== -express-rate-limit@^8.2.1, express-rate-limit@8.3.2: +express-rate-limit@8.3.2, express-rate-limit@^8.2.1: version "8.3.2" resolved "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.3.2.tgz" integrity sha512-77VmFeJkO0/rvimEDuUC5H30oqUC4EyOhyGccfqoLebB0oiEYfM7nwPrsDsBL1gsTpwfzX8SFy2MT3TDyRq+bg== @@ -1098,6 +1656,11 @@ fs-constants@^1.0.0: resolved "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz" integrity sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow== +fsevents@~2.3.3: + version "2.3.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== + function-bind@^1.1.2: version "1.1.2" resolved "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz" @@ -1273,6 +1836,41 @@ json-stringify-safe@^5.0.1: resolved "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz" integrity sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA== +lightningcss-android-arm64@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-android-arm64/-/lightningcss-android-arm64-1.32.0.tgz#f033885116dfefd9c6f54787523e3514b61e1968" + integrity sha512-YK7/ClTt4kAK0vo6w3X+Pnm0D2cf2vPHbhOXdoNti1Ga0al1P4TBZhwjATvjNwLEBCnKvjJc2jQgHXH0NEwlAg== + +lightningcss-darwin-arm64@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.32.0.tgz#50b71871b01c8199584b649e292547faea7af9b5" + integrity sha512-RzeG9Ju5bag2Bv1/lwlVJvBE3q6TtXskdZLLCyfg5pt+HLz9BqlICO7LZM7VHNTTn/5PRhHFBSjk5lc4cmscPQ== + +lightningcss-darwin-x64@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.32.0.tgz#35f3e97332d130b9ca181e11b568ded6aebc6d5e" + integrity sha512-U+QsBp2m/s2wqpUYT/6wnlagdZbtZdndSmut/NJqlCcMLTWp5muCrID+K5UJ6jqD2BFshejCYXniPDbNh73V8w== + +lightningcss-freebsd-x64@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.32.0.tgz#9777a76472b64ed6ff94342ad64c7bafd794a575" + integrity sha512-JCTigedEksZk3tHTTthnMdVfGf61Fky8Ji2E4YjUTEQX14xiy/lTzXnu1vwiZe3bYe0q+SpsSH/CTeDXK6WHig== + +lightningcss-linux-arm-gnueabihf@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.32.0.tgz#13ae652e1ab73b9135d7b7da172f666c410ad53d" + integrity sha512-x6rnnpRa2GL0zQOkt6rts3YDPzduLpWvwAF6EMhXFVZXD4tPrBkEFqzGowzCsIWsPjqSK+tyNEODUBXeeVHSkw== + +lightningcss-linux-arm64-gnu@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.32.0.tgz#417858795a94592f680123a1b1f9da8a0e1ef335" + integrity sha512-0nnMyoyOLRJXfbMOilaSRcLH3Jw5z9HDNGfT/gwCPgaDjnx0i8w7vBzFLFR1f6CMLKF8gVbebmkUN3fa/kQJpQ== + +lightningcss-linux-arm64-musl@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.32.0.tgz#6be36692e810b718040802fd809623cffe732133" + integrity sha512-UpQkoenr4UJEzgVIYpI80lDFvRmPVg6oqboNHfoH4CQIfNA+HOrZ7Mo7KZP02dC6LjghPQJeBsvXhJod/wnIBg== + lightningcss-linux-x64-gnu@1.32.0: version "1.32.0" resolved "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.32.0.tgz" @@ -1283,6 +1881,16 @@ lightningcss-linux-x64-musl@1.32.0: resolved "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.32.0.tgz" integrity sha512-bYcLp+Vb0awsiXg/80uCRezCYHNg1/l3mt0gzHnWV9XP1W5sKa5/TCdGWaR/zBM2PeF/HbsQv/j2URNOiVuxWg== +lightningcss-win32-arm64-msvc@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.32.0.tgz#4f30ba3fa5e925f5b79f945e8cc0d176c3b1ab38" + integrity sha512-8SbC8BR40pS6baCM8sbtYDSwEVQd4JlFTOlaD3gWGHfThTcABnNDBda6eTZeqbofalIJhFx0qKzgHJmcPTnGdw== + +lightningcss-win32-x64-msvc@1.32.0: + version "1.32.0" + resolved "https://registry.yarnpkg.com/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.32.0.tgz#141aa5605645064928902bb4af045fa7d9f4220a" + integrity sha512-Amq9B/SoZYdDi1kFrojnoqPLxYhQ4Wo5XiL8EVJrVsB8ARoC1PWW6VGtT0WKCemjy8aC+louJnjS7U18x3b06Q== + lightningcss@^1.32.0: version "1.32.0" resolved "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz" @@ -1341,16 +1949,16 @@ methods@^1.1.2: resolved "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz" integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== -mime-db@^1.54.0: - version "1.54.0" - resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz" - integrity sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ== - mime-db@1.52.0: version "1.52.0" resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz" integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== +mime-db@^1.54.0: + version "1.54.0" + resolved "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz" + integrity sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ== + mime-types@^2.1.12: version "2.1.35" resolved "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz" @@ -1419,9 +2027,9 @@ node-abi@^3.3.0: dependencies: semver "^7.3.5" -node-addon-api@^8.2.2, node-addon-api@^8.3.1, node-addon-api@^8.5.0: +node-addon-api@^8.2.2, node-addon-api@^8.3.0, node-addon-api@^8.3.1, node-addon-api@^8.5.0: version "8.7.0" - resolved "https://registry.npmjs.org/node-addon-api/-/node-addon-api-8.7.0.tgz" + resolved "https://registry.yarnpkg.com/node-addon-api/-/node-addon-api-8.7.0.tgz#f64f8413456ecbe900221305a3f883c37666473f" integrity sha512-9MdFxmkKaOYVTV+XVRG8ArDwwQ77XIgIPyKASB1k3JPq3M8fGQQQE3YpMOrKm6g//Ktx8ivZr8xo1Qmtqub+GA== node-gyp-build@^4.8.2, node-gyp-build@^4.8.4: @@ -2091,6 +2699,19 @@ tree-sitter-typescript@^0.23.2: node-gyp-build "^4.8.2" tree-sitter-javascript "^0.23.1" +tree-sitter@^0.25.0: + version "0.25.0" + resolved "https://registry.yarnpkg.com/tree-sitter/-/tree-sitter-0.25.0.tgz#d9d94ba00b501df49826c10c0f74037b890788eb" + integrity sha512-PGZZzFW63eElZJDe/b/R/LbsjDDYJa5UEjLZJB59RQsMX+fo0j54fqBPn1MGKav/QNa0JR0zBiVaikYDWCj5KQ== + dependencies: + node-addon-api "^8.3.0" + node-gyp-build "^4.8.4" + +tslib@^2.4.0: + version "2.8.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" + integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== + tsx@^4.15.7: version "4.21.0" resolved "https://registry.npmjs.org/tsx/-/tsx-4.21.0.tgz"