diff --git a/.claude-plugin/plugin.json b/.claude-plugin/plugin.json
index 455e044..6c307a2 100644
--- a/.claude-plugin/plugin.json
+++ b/.claude-plugin/plugin.json
@@ -1,6 +1,6 @@
{
- "name": "vercel",
- "version": "0.32.7",
+ "name": "vercel-plugin",
+ "version": "0.40.0",
"description": "Build and deploy web apps and agents",
"author": {
"name": "Vercel",
diff --git a/.cursor-plugin/plugin.json b/.cursor-plugin/plugin.json
index f804edb..d33627c 100644
--- a/.cursor-plugin/plugin.json
+++ b/.cursor-plugin/plugin.json
@@ -1,6 +1,6 @@
{
"name": "vercel",
- "version": "0.32.5",
+ "version": "0.40.0",
"description": "Build and deploy web apps and agents",
"author": {
"name": "Vercel",
diff --git a/.plugin/plugin.json b/.plugin/plugin.json
index feecf46..19330e4 100644
--- a/.plugin/plugin.json
+++ b/.plugin/plugin.json
@@ -1,6 +1,6 @@
{
"name": "vercel-plugin",
- "version": "0.32.7",
+ "version": "0.40.0",
"description": "Comprehensive Vercel ecosystem plugin — relational knowledge graph, skills for every major product, specialized agents, and Vercel conventions. Turns any AI agent into a Vercel expert.",
"author": {
"name": "Vercel",
diff --git a/CLAUDE.md b/CLAUDE.md
index e7f76cb..322c574 100644
--- a/CLAUDE.md
+++ b/CLAUDE.md
@@ -30,10 +30,6 @@ All hooks are registered in `hooks/hooks.json` and run via `node "${CLAUDE_PLUGI
| SessionStart | `session-start-seen-skills.mjs` | `startup\|resume\|clear\|compact` | — |
| SessionStart | `session-start-profiler.mjs` | `startup\|resume\|clear\|compact` | — |
| SessionStart | `inject-claude-md.mjs` | `startup\|resume\|clear\|compact` | — |
-| PreToolUse | `pretooluse-skill-inject.mjs` | `Read\|Edit\|Write\|Bash` | 5s |
-| UserPromptSubmit | `user-prompt-submit-skill-inject.mjs` | (all prompts) | 5s |
-| PostToolUse | `posttooluse-shadcn-font-fix.mjs` | `Bash` | 5s |
-| PostToolUse | `posttooluse-validate.mjs` | `Write\|Edit` | 5s |
| SessionEnd | `session-end-cleanup.mjs` | — | — |
### Hook Source Files (`hooks/src/*.mts`)
@@ -42,13 +38,9 @@ Source lives in `hooks/src/*.mts` (TypeScript) and compiles to `hooks/*.mjs` (ES
**Entry-point hooks** (wired in hooks.json):
- `session-start-seen-skills.mts` — initializes `VERCEL_PLUGIN_SEEN_SKILLS=""` in `CLAUDE_ENV_FILE`
-- `session-start-profiler.mts` — scans config files + package deps → sets `VERCEL_PLUGIN_LIKELY_SKILLS` (+5 priority boost); detects greenfield mode
-- `inject-claude-md.mts` — outputs `vercel.md` ecosystem graph (52KB) as SessionStart additionalContext
-- `pretooluse-skill-inject.mts` — main injection engine: pattern matching → ranking → dedup → budget enforcement (max 3 skills, 18KB)
-- `user-prompt-submit-skill-inject.mts` — prompt signal scoring engine (max 2 skills, 8KB budget)
-- `posttooluse-validate.mts` — runs skill-defined validation rules on written/edited files
-- `posttooluse-shadcn-font-fix.mjs` — fixes shadcn font loading issues (standalone, no .mts source)
-- `session-end-cleanup.mts` — deletes temp dedup + validation files
+- `session-start-profiler.mts` — activates only for greenfield directories or detected Vercel/Next.js projects, then scans config files + package deps → sets `VERCEL_PLUGIN_LIKELY_SKILLS` (+5 priority boost)
+- `inject-claude-md.mts` — outputs the thin session-start Vercel context plus knowledge update guidance for that same activation set
+- `session-end-cleanup.mts` — deletes session-scoped temp files
**Library modules** (imported by entry-point hooks):
- `hook-env.mts` — shared runtime helpers (env parsing, path resolution)
@@ -61,11 +53,11 @@ Source lives in `hooks/src/*.mts` (TypeScript) and compiles to `hooks/*.mjs` (ES
### Skill Injection Flow
-1. **SessionStart**: Profiler scans project → sets `VERCEL_PLUGIN_LIKELY_SKILLS`
+1. **SessionStart**: For greenfield directories or detected Vercel/Next.js projects, the profiler scans the project → sets `VERCEL_PLUGIN_LIKELY_SKILLS`
2. **PreToolUse** (on Read/Edit/Write/Bash): Match file paths (glob), bash commands (regex), imports (regex+flags) → apply vercel.json routing → apply profiler boost → rank by priority → dedup → inject up to 3 skills within 18KB budget
3. **UserPromptSubmit**: Score prompt text against `promptSignals` (phrases/allOf/anyOf/noneOf) → inject up to 2 skills within 8KB budget
- **3b. Lexical fallback** (when `VERCEL_PLUGIN_LEXICAL_PROMPT=on`): If phrase/allOf/anyOf scoring yields no matches above `minScore`, re-score using a lexical stemmer that normalizes prompt tokens before comparison — catches natural phrasing that exact-substring matching misses
-4. **PostToolUse** (on Write/Edit): Match written file to skills → run `validate` rules → return fix instructions on error
+4. **SessionEnd**: Clean up session-scoped temp files
Special triggers in PreToolUse:
- **TSX review**: After N `.tsx` edits (default 3), injects `react-best-practices`
@@ -154,7 +146,7 @@ Heading extraction is case-insensitive and captures everything from the heading
32 test files across `tests/`. Key categories:
-- **Hook integration**: `pretooluse-skill-inject`, `user-prompt-submit`, `posttooluse-validate`, `session-start-profiler`, `session-start-seen-skills`
+- **Hook integration**: `session-start-profiler`, `session-start-seen-skills`
- **Pattern matching**: `patterns`, `fuzz-glob`, `fuzz-yaml`, `prompt-signals`, `prompt-analysis`
- **Snapshots**: `snapshot-runner` (golden snapshots of skill injection metadata per vercel.json fixture), `snapshots` (snapshot assertions)
- **Validation**: `validate`, `validate-rules`, `build-skill-map`
diff --git a/README.md b/README.md
index a518c87..ae824c9 100644
--- a/README.md
+++ b/README.md
@@ -22,15 +22,15 @@
npx plugins add vercel/vercel-plugin
```
-That's it. The plugin activates automatically — no setup, no commands to learn. Just build.
+That's it. The plugin installs Vercel context, skills, and a lightweight default hook profile.
## What It Does
-This plugin pre-loads AI agents with a **relational knowledge graph** of the entire Vercel ecosystem — every product, library, CLI, API, and service — showing how they relate, when to use each, and providing deep guidance through bundled skills.
+This plugin gives AI agents a **relational knowledge graph** of the Vercel ecosystem plus a bundled skill library covering products, libraries, CLI, APIs, and workflows.
## How Do I Use This?
-After installing, there's nothing to learn — all Vercel guidance happens automatically. The plugin detects what you're working on from your tool calls, file paths, and project config, then injects the right expertise at the right time. Just use your AI agent as you normally would and the plugin handles the rest.
+After installing, the plugin keeps automatic behavior lightweight. Session-start activation now only kicks in for empty directories and detected Vercel/Next.js projects, and Vercel skills are no longer auto-injected on every tool call or every prompt by default. The default post-tool path is now observer-only. The skills remain available for direct use, and the repo still keeps the injection engine for targeted or future opt-in workflows.
## Components
@@ -94,14 +94,12 @@ A text-form relational graph covering:
Lifecycle hooks that run automatically during your session:
-- **Session start context injection** — Injects `vercel.md` (ecosystem graph + conventions) into every session
-- **Session start repo profiler** — Scans config files and dependencies to pre-prime skill matching for faster first tool call
-- **Pre-tool-use skill injection** — Matches tool calls to skills and injects relevant guidance with dedup
-- **Pre-write/edit validation** — Catches deprecated patterns before they're written (sunset packages, old API names, renamed files)
+- **Session start context injection** — Injects a thin Vercel session context plus the knowledge-update guidance for empty directories and detected Vercel/Next.js projects
+- **Session start repo profiler** — Scans config files and dependencies to set likely-skill hints, but only after that same activation check passes
## Usage
-After installing, skills and context are injected automatically. You can also invoke skills directly via slash commands:
+After installing, session context is injected automatically only for empty directories and detected Vercel/Next.js projects. Vercel skills are available on demand, and you can invoke them directly via slash commands:
```
/vercel-plugin:nextjs
@@ -111,18 +109,14 @@ After installing, skills and context are injected automatically. You can also in
## Telemetry
-The plugin has two separate telemetry controls:
-
-- `~/.claude/vercel-plugin-telemetry-preference` controls prompt text only.
-- `VERCEL_PLUGIN_TELEMETRY=off` disables all telemetry.
+Prompt text and bash/tool-call telemetry are not collected.
Behavior:
-- `echo 'enabled' > ~/.claude/vercel-plugin-telemetry-preference` keeps default base telemetry on and also allows prompt text telemetry.
-- `echo 'disabled' > ~/.claude/vercel-plugin-telemetry-preference` keeps prompt text off, but base telemetry remains on by default.
-- `VERCEL_PLUGIN_TELEMETRY=off` disables all telemetry, including prompt text, session metadata, tool names, and skill-injection telemetry.
+- Unset `VERCEL_PLUGIN_TELEMETRY`: default DAU-only telemetry. Sends a once-per-day `dau:active_today` phone-home.
+- `VERCEL_PLUGIN_TELEMETRY=off`: disables all telemetry, including the default DAU-only session-start event.
-Where to set `VERCEL_PLUGIN_TELEMETRY=off`:
+Where to set `VERCEL_PLUGIN_TELEMETRY`:
- macOS / Linux: add it to the shell profile for the environment that launches your agent, such as `~/.zshrc`, `~/.bashrc`, `~/.bash_profile`, or `~/.config/fish/config.fish`, then restart that terminal or app session.
- Windows: set it in the PowerShell environment that launches your agent, add it to your PowerShell profile (`$PROFILE`), or set it as a persistent user environment variable.
@@ -130,12 +124,10 @@ Where to set `VERCEL_PLUGIN_TELEMETRY=off`:
Examples:
```bash
-echo 'disabled' > ~/.claude/vercel-plugin-telemetry-preference
export VERCEL_PLUGIN_TELEMETRY=off
```
```powershell
-$env:VERCEL_PLUGIN_TELEMETRY = "off"
setx VERCEL_PLUGIN_TELEMETRY off
```
diff --git a/docs/01-architecture-overview.md b/docs/01-architecture-overview.md
deleted file mode 100644
index 776f8c7..0000000
--- a/docs/01-architecture-overview.md
+++ /dev/null
@@ -1,427 +0,0 @@
-# Architecture Overview
-
-> **Audience**: Everyone — developers, skill authors, maintainers, and contributors.
-
-The Vercel Plugin for Claude Code is an **event-driven skill injection system** that automatically delivers relevant context to Claude based on what the developer is doing. When a developer opens a Next.js project, edits a configuration file, or types a prompt about deployments, the plugin detects the intent and injects precisely the right knowledge — without the developer asking for it.
-
----
-
-## Table of Contents
-
-1. [System Architecture Diagram](#system-architecture-diagram)
-2. [Core Concepts](#core-concepts)
-3. [Hook Lifecycle](#hook-lifecycle)
-4. [Complete Hook Inventory](#complete-hook-inventory)
-5. [Data Flow: From SKILL.md to Injection](#data-flow-from-skillmd-to-injection)
-6. [User Story: Developer Opens Claude Code in a Next.js Project](#user-story-developer-opens-claude-code-in-a-nextjs-project)
-7. [Glossary](#glossary)
-8. [Cross-References](#cross-references)
-
----
-
-## System Architecture Diagram
-
-```mermaid
-graph TB
- subgraph "Build Time"
- SKILL["skills/*/SKILL.md
(43 skills, YAML frontmatter + markdown)"]
- BUILD_MANIFEST["scripts/build-manifest.ts"]
- BUILD_FROM["scripts/build-from-skills.ts"]
- MANIFEST["generated/skill-manifest.json
(pre-compiled glob→regex)"]
- TEMPLATES["agents/*.md.tmpl
commands/*.md.tmpl"]
- GENERATED_MD["agents/*.md
commands/*.md"]
-
- SKILL -->|"extract frontmatter
compile patterns"| BUILD_MANIFEST
- BUILD_MANIFEST --> MANIFEST
- SKILL -->|"resolve {{include:skill:...}}"| BUILD_FROM
- TEMPLATES --> BUILD_FROM
- BUILD_FROM --> GENERATED_MD
- end
-
- subgraph "Runtime (Claude Code Session)"
- HOOKS_JSON["hooks/hooks.json
(hook registry)"]
-
- subgraph "SessionStart"
- PROFILER["session-start-profiler.mjs
scans project → LIKELY_SKILLS"]
- SEEN_INIT["session-start-seen-skills.mjs
initializes dedup state"]
- INJECT_MD["inject-claude-md.mjs
injects vercel.md ecosystem guide"]
- end
-
- subgraph "PreToolUse"
- SKILL_INJECT["pretooluse-skill-inject.mjs
pattern match → rank → inject"]
- SUBAGENT_OBSERVE["pretooluse-subagent-spawn-observe.mjs
captures pending Agent launches"]
- end
-
- subgraph "UserPromptSubmit"
- PROMPT_INJECT["user-prompt-submit-skill-inject.mjs
prompt signal scoring → inject"]
- end
-
- subgraph "SubagentStart / SubagentStop"
- SUBAGENT_BOOT["subagent-start-bootstrap.mjs
budget-aware context for subagents"]
- SUBAGENT_STOP["subagent-stop-sync.mjs
records lifecycle to ledger"]
- end
-
- subgraph "PostToolUse"
- VALIDATE["posttooluse-validate.mjs
skill validation rules"]
- SHADCN["posttooluse-shadcn-font-fix.mjs
shadcn font patch"]
- VERIFY_OBS["posttooluse-verification-observe.mjs
verification boundary detection"]
- end
-
- CLEANUP["session-end-cleanup.mjs
removes temp files"]
- end
-
- MANIFEST -->|"loaded at runtime"| SKILL_INJECT
- MANIFEST -->|"loaded at runtime"| PROMPT_INJECT
- MANIFEST -->|"loaded at runtime"| VALIDATE
- MANIFEST -->|"loaded at runtime"| SUBAGENT_BOOT
- HOOKS_JSON -->|"registers all hooks"| PROFILER
- HOOKS_JSON --> SKILL_INJECT
- HOOKS_JSON --> PROMPT_INJECT
- HOOKS_JSON --> CLEANUP
-
- SKILL_INJECT -->|"additionalContext"| CLAUDE["Claude Code Agent"]
- PROMPT_INJECT -->|"additionalContext"| CLAUDE
- SUBAGENT_BOOT -->|"additionalContext"| SUBAGENT["Spawned Subagent"]
-
- style SKILL fill:#f9f,stroke:#333
- style MANIFEST fill:#bbf,stroke:#333
- style CLAUDE fill:#bfb,stroke:#333
-```
-
----
-
-## Core Concepts
-
-The plugin is built around a simple pipeline:
-
-1. **Skills** define *what* knowledge exists (markdown content + matching rules)
-2. **The build** compiles skills into an optimized **manifest** (pre-compiled regex patterns)
-3. **Hooks** fire at lifecycle events, use the manifest to **match** and **rank** skills, then **inject** the right ones into Claude's context
-
-Every piece flows through this pipeline. Skills are the single source of truth — the manifest is derived, hooks consume it, and templates reference it.
-
----
-
-## Hook Lifecycle
-
-The plugin registers hooks for seven Claude Code lifecycle events. Here's how they execute in sequence during a typical session:
-
-```mermaid
-sequenceDiagram
- participant Dev as Developer
- participant CC as Claude Code
- participant SS as SessionStart Hooks
- participant PTU as PreToolUse Hooks
- participant UPS as UserPromptSubmit
- participant SA as Subagent Hooks
- participant POU as PostToolUse Hooks
- participant SE as SessionEnd
-
- Dev->>CC: Opens project
-
- rect rgb(230, 240, 255)
- Note over SS: Phase 1: Session Initialization
- CC->>SS: session-start-seen-skills
- SS-->>CC: SEEN_SKILLS="" initialized
- CC->>SS: session-start-profiler
- SS-->>CC: LIKELY_SKILLS="nextjs,ai-sdk,..."
- CC->>SS: inject-claude-md
- SS-->>CC: vercel.md ecosystem guide (52KB)
- end
-
- Dev->>CC: "Add a cron job to my API"
-
- rect rgb(255, 245, 230)
- Note over UPS: Phase 2: Prompt Analysis
- CC->>UPS: user-prompt-submit-skill-inject
- Note over UPS: Score: "cron" → vercel-cron (+6)
Budget: 8KB, max 2 skills
- UPS-->>CC: Inject vercel-cron skill
- end
-
- CC->>CC: Decides to read vercel.json
-
- rect rgb(230, 255, 230)
- Note over PTU: Phase 3: Tool-Time Injection
- CC->>PTU: pretooluse-skill-inject (Read vercel.json)
- Note over PTU: Path match: vercel.json → vercel-config
Budget: 18KB, max 3 skills
- PTU-->>CC: Inject vercel-config skill
- end
-
- CC->>CC: Writes app/api/cron/route.ts
-
- rect rgb(255, 230, 230)
- Note over POU: Phase 4: Post-Write Validation
- CC->>POU: posttooluse-validate (Write route.ts)
- Note over POU: Check validation rules
for matched skills
- POU-->>CC: Validation passed ✓
- end
-
- opt If Claude spawns a subagent
- rect rgb(245, 230, 255)
- Note over SA: Phase 5: Subagent Context
- CC->>PTU: pretooluse-subagent-spawn-observe (Agent tool)
- Note over PTU: Captures pending launch metadata
- CC->>SA: subagent-start-bootstrap
- Note over SA: Budget-aware injection
Explore=1KB, Plan=3KB, GP=8KB
- SA-->>CC: Skill context for subagent
- CC->>SA: subagent-stop-sync
- Note over SA: Records lifecycle to ledger
- end
- end
-
- Dev->>CC: Ends session
-
- rect rgb(240, 240, 240)
- Note over SE: Phase 6: Cleanup
- CC->>SE: session-end-cleanup
- Note over SE: Removes temp files:
dedup claims, profile cache,
pending launches, ledger
- end
-```
-
----
-
-## Complete Hook Inventory
-
-Every hook registered in `hooks/hooks.json`, organized by lifecycle event:
-
-### SessionStart
-
-Fires once when a Claude Code session starts (on `startup|resume|clear|compact`).
-
-| Hook | Source | Purpose |
-|------|--------|---------|
-| `session-start-seen-skills.mjs` | `hooks/src/session-start-seen-skills.mts` | Initializes `VERCEL_PLUGIN_SEEN_SKILLS=""` in the env file — the seed state for dedup tracking |
-| `session-start-profiler.mjs` | `hooks/src/session-start-profiler.mts` | Scans project for frameworks, dependencies, and config files. Sets `VERCEL_PLUGIN_LIKELY_SKILLS` (+5 priority boost). Detects greenfield projects. Caches profile for subagents |
-| `inject-claude-md.mjs` | `hooks/src/inject-claude-md.mts` | Injects the `vercel.md` ecosystem guide (~52KB) as additionalContext. Appends greenfield execution mode banner if project is empty |
-
-### PreToolUse
-
-Fires before each tool execution. Two matchers handle different tool types.
-
-| Hook | Matcher | Source | Purpose |
-|------|---------|--------|---------|
-| `pretooluse-skill-inject.mjs` | `Read\|Edit\|Write\|Bash` | `hooks/src/pretooluse-skill-inject.mts` | **Main injection engine.** Matches file paths (glob), bash commands (regex), and imports (regex) against skill patterns. Applies vercel.json routing (±10), profiler boost (+5), ranks by priority, deduplicates, and injects up to **3 skills within 18KB budget** |
-| `pretooluse-subagent-spawn-observe.mjs` | `Agent` | `hooks/src/pretooluse-subagent-spawn-observe.mts` | **Observer.** Captures pending subagent spawn metadata (description, prompt, type) to a JSONL file. Later consumed by `subagent-start-bootstrap` to correlate skills with the subagent's task |
-
-**Special triggers in pretooluse-skill-inject:**
-- **TSX review**: After N `.tsx` edits (default 3, configurable via `VERCEL_PLUGIN_REVIEW_THRESHOLD`), injects `react-best-practices`
-- **Dev server detection**: Boosts `agent-browser-verify` when dev server patterns appear in bash commands
-- **Vercel env help**: One-time injection for `vercel env` commands
-
-### UserPromptSubmit
-
-Fires when the user submits a prompt (matches all prompts — empty matcher string).
-
-| Hook | Source | Purpose |
-|------|--------|---------|
-| `user-prompt-submit-skill-inject.mjs` | `hooks/src/user-prompt-submit-skill-inject.mts` | **Prompt signal scoring engine.** Normalizes prompt text (lowercases, expands contractions), scores against skill `promptSignals` frontmatter (phrases +6, allOf +4, anyOf +1 capped at +2, noneOf suppresses). Classifies troubleshooting intent. Injects up to **2 skills within 8KB budget** |
-
-### SubagentStart
-
-Fires when a subagent is spawned (matches any agent type via `.+`).
-
-| Hook | Source | Purpose |
-|------|--------|---------|
-| `subagent-start-bootstrap.mjs` | `hooks/src/subagent-start-bootstrap.mts` | **Budget-aware subagent context injection.** Scales content by agent type: `Explore` gets ~1KB (skill names + profile summary), `Plan` gets ~3KB (summaries + deployment constraints), `general-purpose` gets ~8KB (full skill bodies with summary fallback). Reads profiler cache and pending launch metadata. Marks injected skills in agent-scoped dedup claims |
-
-### SubagentStop
-
-Fires when a subagent completes (matches any agent type via `.+`).
-
-| Hook | Source | Purpose |
-|------|--------|---------|
-| `subagent-stop-sync.mjs` | `hooks/src/subagent-stop-sync.mts` | **Observer.** Records subagent lifecycle metadata (agent ID, type, skill count, timestamp) to a JSONL ledger at `/vercel-plugin--subagent-ledger.jsonl` |
-
-### PostToolUse
-
-Fires after tool execution. Two matchers handle different scenarios.
-
-| Hook | Matcher | Source | Purpose |
-|------|---------|--------|---------|
-| `posttooluse-shadcn-font-fix.mjs` | `Bash` | standalone (no .mts source) | Fixes shadcn font loading issues by patching font import statements |
-| `posttooluse-verification-observe.mjs` | `Bash` | `hooks/src/posttooluse-verification-observe.mts` | **Observer.** Classifies bash commands into verification boundaries: `uiRender` (browser/screenshot), `clientRequest` (curl/fetch), `serverHandler` (log tailing), `environment` (env var reads). Infers routes from recent file edits or command URLs. Emits structured log events |
-| `posttooluse-validate.mjs` | `Write\|Edit` | `hooks/src/posttooluse-validate.mts` | **Validation engine.** Matches written/edited files to skills, runs regex-based validation rules from skill frontmatter. Reports errors (mandatory fix) and warnings (suggestions) with line numbers |
-
-### SessionEnd
-
-Fires when the session ends (no matcher — always fires).
-
-| Hook | Source | Purpose |
-|------|--------|---------|
-| `session-end-cleanup.mjs` | `hooks/src/session-end-cleanup.mts` | **Best-effort cleanup.** Removes all session-scoped temp files: dedup claims, dedup session file, profile cache, pending launches JSONL, subagent ledger. Silently ignores failures |
-
-### Shared Library Modules
-
-These are not hooks themselves but are imported by entry-point hooks:
-
-| Module | Purpose |
-|--------|---------|
-| `hook-env.mts` | Shared runtime helpers: env file parsing, plugin root resolution, dedup claim operations (atomic O_EXCL), audit logging, profile cache paths |
-| `patterns.mts` | Glob→regex conversion, path/bash/import matching with match reasons, ranking engine, dedup state merging |
-| `prompt-patterns.mts` | Prompt text normalization (contraction expansion), signal compilation, scoring, lexical fallback, troubleshooting intent classification |
-| `skill-map-frontmatter.mts` | Inline YAML parser (no js-yaml), frontmatter extraction, `buildSkillMap()`, `validateSkillMap()` with structured warnings |
-| `logger.mts` | Structured JSON logging to stderr (off/summary/debug/trace levels), per-invocation tracing, timing metrics |
-| `vercel-config.mts` | Reads `vercel.json` keys → maps to skill routing adjustments (±10 priority) |
-| `prompt-analysis.mts` | Dry-run prompt analysis reports (for debugging prompt matching) |
-| `lexical-index.mts` | MiniSearch-based lexical fallback index for fuzzy skill matching |
-| `subagent-state.mts` | File-locked JSONL operations for pending launches and agent-scoped dedup claims |
-
----
-
-## Data Flow: From SKILL.md to Injection
-
-Here's how a skill goes from source markdown to injected context:
-
-```
-┌─────────────────────────────────────────────────────────────────┐
-│ 1. AUTHOR │
-│ │
-│ skills/vercel-cron/SKILL.md │
-│ ┌──────────────────────────────┐ │
-│ │ --- │ │
-│ │ name: vercel-cron │ ← YAML frontmatter defines │
-│ │ metadata: │ matching rules + priority │
-│ │ priority: 6 │ │
-│ │ pathPatterns: │ │
-│ │ - "vercel.json" │ │
-│ │ promptSignals: │ │
-│ │ phrases: ["cron job"] │ │
-│ │ --- │ │
-│ │ # How to configure crons... │ ← Markdown body = injected │
-│ └──────────────────────────────┘ context │
-│ │
-├─────────────────────────────────────────────────────────────────┤
-│ 2. BUILD (bun run build) │
-│ │
-│ build-manifest.ts reads all 43 SKILL.md files │
-│ ↓ │
-│ Parses YAML frontmatter (inline parser, not js-yaml) │
-│ ↓ │
-│ Compiles globs → regex at build time for runtime speed │
-│ ↓ │
-│ generated/skill-manifest.json (paired arrays format v2) │
-│ │
-├─────────────────────────────────────────────────────────────────┤
-│ 3. RUNTIME (Claude Code session) │
-│ │
-│ Hook loads manifest → compiles patterns → matches input │
-│ ↓ │
-│ Ranking: base priority (6) │
-│ + vercel.json routing (±10 if key matches) │
-│ + profiler boost (+5 if in LIKELY_SKILLS) │
-│ ↓ │
-│ Dedup: skip if skill already claimed in session │
-│ ↓ │
-│ Budget: fit skills into byte limit (18KB PreToolUse, 8KB UPS) │
-│ ↓ │
-│ Inject as additionalContext → Claude reads it before acting │
-└─────────────────────────────────────────────────────────────────┘
-```
-
----
-
-## User Story: Developer Opens Claude Code in a Next.js Project
-
-> **Scenario**: A developer opens Claude Code in a Next.js project that uses the AI SDK and has a `vercel.json` with cron configuration. They ask: "Add a new cron job that sends a weekly digest email."
-
-### Phase 1: Session Initialization
-
-When the session starts, three hooks fire in sequence:
-
-1. **`session-start-seen-skills`** initializes dedup tracking:
- ```
- VERCEL_PLUGIN_SEEN_SKILLS=""
- ```
-
-2. **`session-start-profiler`** scans the project root:
- - Finds `next.config.js` → hints `nextjs`
- - Reads `package.json`, finds `ai` dependency → hints `ai-sdk`
- - Finds `vercel.json` with `crons` key → hints `vercel-cron`
- - Checks `vercel --version` → up to date
- - Checks `agent-browser` availability → found on PATH
- - **Result**: `VERCEL_PLUGIN_LIKELY_SKILLS="nextjs,ai-sdk,vercel-cron"`
- - Caches profile to `/vercel-plugin--profile.json`
-
-3. **`inject-claude-md`** loads `vercel.md` (~52KB ecosystem guide) and outputs it as additionalContext. Claude now has broad Vercel platform knowledge.
-
-### Phase 2: Prompt Analysis
-
-The developer types: *"Add a new cron job that sends a weekly digest email"*
-
-**`user-prompt-submit-skill-inject`** fires:
-- Normalizes prompt: `"add a new cron job that sends a weekly digest email"`
-- Scores against all skills with `promptSignals`:
- - `vercel-cron`: phrase `"cron job"` matches → +6 → score 6 ≥ minScore 6 ✓
-- Dedup check: `vercel-cron` not in SEEN_SKILLS → proceed
-- Budget check: skill body fits within 8KB → inject
-- Claims `vercel-cron` in dedup state
-- **Result**: Claude receives the full `vercel-cron` skill content as additionalContext
-
-### Phase 3: Tool-Time Injection
-
-Claude decides to read `vercel.json` to understand existing cron configuration.
-
-**`pretooluse-skill-inject`** fires (tool: Read, path: `vercel.json`):
-- Path match: `vercel.json` → matches `vercel-config` skill's pathPattern
-- Also matches `vercel-cron` → but already claimed in dedup → skip
-- Profiler boost: `vercel-config` not in LIKELY_SKILLS → no boost
-- Ranking: `vercel-config` at base priority
-- Budget: fits within 18KB → inject
-- **Result**: Claude receives `vercel-config` skill content
-
-### Phase 4: Writing Code
-
-Claude creates `app/api/cron/weekly-digest/route.ts`.
-
-**`posttooluse-validate`** fires (tool: Write):
-- Matches file path against skill validation rules
-- Checks `vercel-cron` validation rules (e.g., route handler patterns)
-- All rules pass → no violations reported
-- **Result**: Write proceeds without intervention
-
-### Phase 5: Session End
-
-Developer closes the session.
-
-**`session-end-cleanup`** fires:
-- Deletes `/vercel-plugin--seen-skills.txt`
-- Deletes `/vercel-plugin--seen-skills.d/` (claim dir)
-- Deletes `/vercel-plugin--profile.json`
-- All temp state is gone — next session starts fresh
-
-### What the Developer Experienced
-
-The developer never asked for help with Vercel cron configuration. They just described what they wanted. The plugin:
-- Detected their Next.js + Vercel stack at session start
-- Recognized "cron job" in their prompt and injected cron docs
-- Injected config knowledge when Claude read `vercel.json`
-- Validated the output after writing
-
-All of this happened transparently. The developer got expert-level Vercel guidance without knowing the plugin was there.
-
----
-
-## Glossary
-
-| Term | Definition |
-|------|------------|
-| **Skill** | A unit of injectable knowledge. Lives in `skills//SKILL.md` with YAML frontmatter (matching rules, priority, validation) and a markdown body (the content injected into Claude's context) |
-| **Hook** | A Node.js script registered in `hooks/hooks.json` that runs at a specific Claude Code lifecycle event. Hooks receive JSON on stdin and may output JSON on stdout to modify Claude's behavior (e.g., inject additionalContext) |
-| **Manifest** | `generated/skill-manifest.json` — a pre-compiled index of all skill frontmatter with glob patterns converted to regex at build time. Hooks load this at runtime instead of scanning SKILL.md files directly |
-| **Dedup** | The deduplication system that prevents the same skill from being injected twice in a session. Uses three layers: atomic file claims (O_EXCL), a session file (comma-delimited), and an env var (`VERCEL_PLUGIN_SEEN_SKILLS`). All three are merged via `mergeSeenSkillStates()` |
-| **Budget** | Byte limits that cap how much skill content can be injected per hook invocation. PreToolUse: 18KB max, 3 skills. UserPromptSubmit: 8KB max, 2 skills. SubagentStart: varies by agent type (1KB–8KB). Prevents context window bloat |
-| **Profiler** | The `session-start-profiler` hook that scans the project at session start — checking config files, package.json dependencies, and vercel.json keys — to pre-identify likely relevant skills. Profiled skills receive a +5 priority boost |
-| **Claim Dir** | `/vercel-plugin--seen-skills.d/` — a directory of empty files, one per claimed skill, created atomically with `O_EXCL` flag to prevent race conditions. The authoritative source of dedup truth. Agent-scoped variants exist for subagent isolation |
-| **Priority** | A numeric score (typically 4–8) that determines injection order. Base priority is set in SKILL.md frontmatter. Modified at runtime by vercel.json routing (±10), profiler boost (+5), and prompt signal scores. Higher priority = injected first |
-| **additionalContext** | The mechanism hooks use to inject content into Claude's context. Returned as part of the hook's JSON output. Claude Code prepends this to the tool result or prompt, so the agent sees it before acting |
-| **Greenfield** | A project with no source files (only dot-directories). The profiler detects this and sets `VERCEL_PLUGIN_GREENFIELD=true`, which triggers a special execution mode: skip planning, use sensible defaults, bootstrap immediately |
-| **Observer Hook** | A hook that records telemetry but does not modify behavior. Returns empty JSON `{}`. Examples: `pretooluse-subagent-spawn-observe`, `posttooluse-verification-observe`, `subagent-stop-sync` |
-
----
-
-## Cross-References
-
-- **Section 2**: [Injection Pipeline Deep-Dive](./02-injection-pipeline.md) — detailed walkthrough of pattern matching, ranking, budget enforcement, and prompt signal scoring
-- **Section 3**: [Skill Authoring Guide](./03-skill-authoring.md) — how to create, test, and validate a new skill
-- **Section 4**: [Operations & Debugging](./04-operations-debugging.md) — environment variables, log levels, `doctor`/`explain` CLI, dedup troubleshooting
-- **Section 5**: [Reference](./05-reference.md) — complete hook registry, env var table, YAML parser edge cases, skill catalog
diff --git a/docs/02-injection-pipeline.md b/docs/02-injection-pipeline.md
deleted file mode 100644
index 7ecc1f1..0000000
--- a/docs/02-injection-pipeline.md
+++ /dev/null
@@ -1,768 +0,0 @@
-# 2. Injection Pipeline Deep-Dive
-
-This document explains how vercel-plugin decides **which skills to inject**, **when**, and **why**. It covers both the PreToolUse hook (file/bash/import pattern matching) and the UserPromptSubmit hook (prompt signal scoring), including the ranking pipeline, dedup state machine, budget enforcement, and special-case triggers.
-
----
-
-## Table of Contents
-
-1. [Overview](#overview)
-2. [PreToolUse Pipeline](#pretooluse-pipeline)
- - [Stage 1: parseInput](#stage-1-parseinput)
- - [Stage 2: loadSkills](#stage-2-loadskills)
- - [Stage 3: matchSkills](#stage-3-matchskills)
- - [Stage 4: deduplicateSkills](#stage-4-deduplicateskills)
- - [Stage 5: injectSkills](#stage-5-injectskills)
- - [Stage 6: formatOutput](#stage-6-formatoutput)
-3. [UserPromptSubmit Pipeline](#userpromptsubmit-pipeline)
-4. [Pattern Matching In Depth](#pattern-matching-in-depth)
- - [Glob-to-Regex Compilation](#glob-to-regex-compilation)
- - [Path Matching Strategy](#path-matching-strategy)
- - [Bash Pattern Matching](#bash-pattern-matching)
- - [Import Pattern Matching](#import-pattern-matching)
-5. [Prompt Signal Scoring](#prompt-signal-scoring)
- - [Scoring Weights](#scoring-weights)
- - [Normalization and Contractions](#normalization-and-contractions)
- - [Lexical Fallback Scoring](#lexical-fallback-scoring)
- - [Troubleshooting Intent Classification](#troubleshooting-intent-classification)
-6. [Ranking Pipeline](#ranking-pipeline)
- - [Base Priority](#base-priority)
- - [Vercel.json Key-Aware Routing](#verceljson-key-aware-routing)
- - [Profiler Boost](#profiler-boost)
- - [Setup Mode Routing](#setup-mode-routing)
- - [Unified Ranker](#unified-ranker)
-7. [Dedup State Machine](#dedup-state-machine)
- - [Three State Sources](#three-state-sources)
- - [Merge Strategy](#merge-strategy)
- - [Fallback Strategies](#fallback-strategies)
- - [Scope-Aware Dedup for Subagents](#scope-aware-dedup-for-subagents)
-8. [Budget Enforcement](#budget-enforcement)
- - [PreToolUse Budget](#pretooluse-budget)
- - [UserPromptSubmit Budget](#userpromptsubmit-budget)
- - [Summary Fallback](#summary-fallback)
-9. [Special-Case Triggers](#special-case-triggers)
- - [TSX Review Trigger](#tsx-review-trigger)
- - [Dev Server Detection](#dev-server-detection)
- - [Vercel Env Help](#vercel-env-help)
- - [Investigation Companion Selection](#investigation-companion-selection)
-10. [User Story: Why Didn't My Skill Inject?](#user-story-why-didnt-my-skill-inject)
-
----
-
-## Overview
-
-The injection pipeline is the core mechanism that makes vercel-plugin context-aware. Instead of dumping all 43 skills into every Claude session, the plugin watches what Claude is doing and injects only the skills that are relevant to the current action.
-
-There are two independent injection paths:
-
-| Hook | Trigger | Budget | Max Skills | Match Method |
-|------|---------|--------|------------|--------------|
-| **PreToolUse** | Claude calls Read, Edit, Write, or Bash | 18 KB | 5 | File path globs, bash command regex, import patterns |
-| **UserPromptSubmit** | User types a prompt | 8 KB | 2 | Prompt signal scoring (phrases, allOf, anyOf, noneOf) |
-
-Both hooks share the same dedup system to prevent re-injecting skills that were already delivered in the current session.
-
-```mermaid
-flowchart LR
- subgraph "Claude Code Session"
- A[User types prompt] --> B{UserPromptSubmit}
- C[Claude calls tool] --> D{PreToolUse}
- end
-
- B --> E[Prompt Signal Scoring]
- D --> F[Pattern Matching]
-
- E --> G[Rank + Dedup + Budget]
- F --> G
-
- G --> H[additionalContext injection]
- H --> I[Claude sees skill content]
-```
-
----
-
-## PreToolUse Pipeline
-
-The PreToolUse hook fires every time Claude calls a Read, Edit, Write, or Bash tool. It runs a six-stage pipeline, each stage independently importable and testable.
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as PreToolUse Hook
- participant SM as Skill Map
- participant FS as File System
- participant Dedup as Dedup Engine
-
- CC->>Hook: stdin JSON (tool_name, tool_input, session_id)
-
- Note over Hook: Stage 1: parseInput
- Hook->>Hook: Extract toolName, toolInput, toolTarget, scopeId
-
- Note over Hook: Stage 2: loadSkills
- Hook->>SM: Try manifest (generated/skill-manifest.json)
- alt Manifest exists (v2)
- SM-->>Hook: Pre-compiled regex patterns
- else No manifest
- Hook->>FS: Scan skills/*/SKILL.md
- FS-->>Hook: Build + validate skill map
- Hook->>Hook: Compile glob→regex at runtime
- end
-
- Note over Hook: Stage 3: matchSkills
- alt Read/Edit/Write tool
- Hook->>Hook: Match file_path against pathPatterns
- Hook->>Hook: Match file content against importPatterns
- else Bash tool
- Hook->>Hook: Match command against bashPatterns
- end
-
- Note over Hook: Stage 4: deduplicateSkills
- Hook->>Dedup: Read seen-skills (env + file + claims)
- Dedup-->>Hook: Merged seen set
- Hook->>Hook: Filter already-seen
- Hook->>Hook: Apply vercel.json routing (±10)
- Hook->>Hook: Apply profiler boost (+5)
- Hook->>Hook: Apply setup-mode routing (+50)
- Hook->>Hook: rankEntries() → sort by effectivePriority DESC
-
- Note over Hook: Stage 5: injectSkills
- Hook->>FS: Read SKILL.md body for each ranked skill
- Hook->>Hook: Budget check (18KB / 3 skills)
- Hook->>Hook: Summary fallback if over budget
- Hook->>Dedup: Claim injected skills (atomic file lock)
-
- Note over Hook: Stage 6: formatOutput
- Hook->>CC: stdout JSON with additionalContext
-```
-
-### Stage 1: parseInput
-
-**Source**: `pretooluse-skill-inject.mts:parseInput()`
-
-Reads JSON from stdin and extracts:
-- `toolName` — one of `Read`, `Edit`, `Write`, `Bash`
-- `toolInput` — the tool's arguments (e.g., `file_path`, `command`)
-- `sessionId` — used for file-based dedup
-- `toolTarget` — the primary target (file path for file tools, command string for Bash)
-- `scopeId` — agent ID for subagent-scoped dedup (undefined for the main agent)
-
-Unsupported tools (anything not in `["Read", "Edit", "Write", "Bash"]`) are rejected immediately with an empty `{}` response.
-
-### Stage 2: loadSkills
-
-**Source**: `pretooluse-skill-inject.mts:loadSkills()`
-
-Loads the skill catalog with a two-tier strategy:
-
-1. **Try the manifest** (`generated/skill-manifest.json`) — a pre-built JSON file containing all skill metadata with pre-compiled regex sources. Version 2 manifests include paired arrays (`pathPatterns` ↔ `pathRegexSources`) so the hook can reconstruct `RegExp` objects directly without re-running glob-to-regex compilation.
-
-2. **Fall back to live scan** — if no manifest exists, scans `skills/*/SKILL.md`, parses YAML frontmatter via `buildSkillMap()`, validates with `validateSkillMap()`, and compiles patterns at runtime.
-
-The manifest path is always preferred because it's faster (no filesystem scan, no YAML parsing, no glob compilation).
-
-### Stage 3: matchSkills
-
-**Source**: `pretooluse-skill-inject.mts:matchSkills()`
-
-For **file tools** (Read/Edit/Write):
-1. Match `file_path` against each skill's compiled `pathPatterns`
-2. If no path match, attempt **import matching** — scan file content (`content`, `old_string`, `new_string`) against `importPatterns`
-
-For **Bash**:
-1. Match `command` against each skill's compiled `bashPatterns`
-
-Each match produces a `MatchReason` with the winning pattern and match type (`full`, `basename`, `suffix`, `import`).
-
-### Stage 4: deduplicateSkills
-
-**Source**: `pretooluse-skill-inject.mts:deduplicateSkills()`
-
-This is where priority adjustments and filtering happen:
-
-1. **Filter already-seen** — remove skills present in the merged dedup state
-2. **Vercel.json routing** — if the target is `vercel.json`, read its keys and adjust priorities (see [Ranking Pipeline](#ranking-pipeline))
-3. **Profiler boost** — skills in `VERCEL_PLUGIN_LIKELY_SKILLS` get +5 priority
-4. **Setup-mode routing** — in greenfield projects, `bootstrap` gets a +50 priority boost
-5. **Rank** — sort by `effectivePriority` DESC, then skill name ASC
-6. **Cap** — take the top N skills (default 5)
-
-### Stage 5: injectSkills
-
-**Source**: `pretooluse-skill-inject.mts:injectSkills()`
-
-For each ranked skill (in priority order):
-1. Check the hard ceiling (max 3 skills)
-2. Read `skills//SKILL.md` from disk
-3. Strip YAML frontmatter, keep only the body
-4. Wrap in HTML comment markers: `...`
-5. Check byte budget — the first skill always gets full body; subsequent skills must fit within remaining budget
-6. If over budget, try summary fallback (see [Budget Enforcement](#budget-enforcement))
-7. Atomically claim the skill in the dedup system
-
-### Stage 6: formatOutput
-
-Assembles the final JSON output:
-
-```json
-{
- "hookSpecificOutput": {
- "additionalContext": "\n...skill body...\n"
- }
-}
-```
-
-Also embeds a metadata comment for debugging:
-```html
-
-```
-
----
-
-## UserPromptSubmit Pipeline
-
-The UserPromptSubmit hook fires when the user types a prompt, before any tool calls. It uses a different matching strategy — **prompt signal scoring** instead of pattern matching.
-
-**Source**: `user-prompt-submit-skill-inject.mts`
-
-Pipeline stages:
-1. **parsePromptInput** — extract prompt text, session ID, cwd; reject prompts shorter than 10 characters
-2. **normalizePromptText** — lowercase, expand contractions, stem tokens, collapse whitespace
-3. **loadSkills** — reuses the same `loadSkills()` from PreToolUse
-4. **analyzePrompt** — score every skill's `promptSignals` against the normalized prompt (see [Prompt Signal Scoring](#prompt-signal-scoring))
-5. **Troubleshooting intent routing** — classify prompt into flow-verification, stuck-investigation, or browser-only buckets
-6. **Investigation companion selection** — when `investigation-mode` is selected, pick the best companion skill
-7. **Dedup + inject** — filter seen skills, load SKILL.md bodies, enforce 8KB budget / 2 skill cap
-8. **formatOutput** — build banner explaining why skills were auto-suggested
-
-Key differences from PreToolUse:
-- **Budget**: 8 KB (vs 18 KB)
-- **Max skills**: 2 (vs 5)
-- **Match method**: prompt signal scoring (not file/bash patterns)
-- **Minimum prompt length**: 10 characters
-
----
-
-## Pattern Matching In Depth
-
-### Glob-to-Regex Compilation
-
-**Source**: `patterns.mts:globToRegex()`
-
-Skill frontmatter uses glob patterns for `pathPatterns`. At build time (or runtime if no manifest), these are compiled to JavaScript `RegExp` objects.
-
-Supported wildcards:
-
-| Glob | Regex | Meaning |
-|------|-------|---------|
-| `*` | `[^/]*` | Match any characters except `/` |
-| `**` | `.*` | Match anything (including `/`) |
-| `**/` | `(?:[^/]+/)*` | Match zero or more path segments |
-| `?` | `[^/]` | Match exactly one character (not `/`) |
-| `{a,b}` | `(?:a\|b)` | Brace expansion (alternation) |
-
-**Examples**:
-
-```
-Glob: **/*.tsx
-Regex: ^(?:[^/]+/)*[^/]*\.tsx$
-Match: src/components/Button.tsx ✓
- Button.tsx ✓
- src/styles/global.css ✗
-
-Glob: app/**/page.{ts,tsx}
-Regex: ^app/(?:[^/]+/)*page\.(?:ts|tsx)$
-Match: app/page.tsx ✓
- app/dashboard/settings/page.ts ✓
- lib/page.tsx ✗
-
-Glob: vercel.json
-Regex: ^vercel\.json$
-Match: vercel.json ✓
- app/vercel.json ✗ (but see suffix matching below)
-```
-
-The manifest (v2) stores the compiled regex source alongside the original glob, so the hook only needs `new RegExp(source)` at runtime — no glob compilation needed.
-
-### Path Matching Strategy
-
-**Source**: `patterns.mts:matchPathWithReason()`
-
-Path matching attempts three strategies in order:
-
-1. **Full path match** — test the entire normalized path against the regex
-2. **Basename match** — test just the filename (`Button.tsx`)
-3. **Suffix match** — progressively test longer suffixes (`components/Button.tsx`, `src/components/Button.tsx`, etc.)
-
-This multi-strategy approach means `vercel.json` in a glob will match both `/project/vercel.json` and `/project/apps/web/vercel.json`.
-
-### Bash Pattern Matching
-
-**Source**: `patterns.mts:matchBashWithReason()`
-
-Bash patterns are regular expressions tested against the full command string. No special normalization is applied — the regex is tested directly.
-
-```yaml
-# In SKILL.md frontmatter:
-metadata:
- bashPatterns: ["\\bnext\\s+dev\\b", "\\bnpm\\s+run\\s+build\\b"]
-```
-
-### Import Pattern Matching
-
-**Source**: `patterns.mts:importPatternToRegex()`
-
-Import patterns match against file content (not file paths). They detect `import`, `require`, and dynamic `import()` statements.
-
-```yaml
-# In SKILL.md frontmatter:
-metadata:
- importPatterns: ["@vercel/analytics", "ai"]
-```
-
-The pattern `ai` generates a regex that matches:
-```
-from 'ai'
-from 'ai/react'
-require('ai')
-import('ai/rsc')
-```
-
-Import matching is a **fallback** — it only runs when path matching produces no hit for a given skill.
-
----
-
-## Prompt Signal Scoring
-
-**Source**: `prompt-patterns.mts`
-
-Each skill can define `promptSignals` in its frontmatter to declare what user prompts should trigger injection.
-
-### Scoring Weights
-
-```mermaid
-flowchart TB
- subgraph "Scoring Engine"
- P[Prompt text] --> N[Normalize: lowercase + expand contractions + stem]
- N --> NONE{noneOf check}
- NONE -->|term found| SUPPRESS[score = -Infinity, REJECT]
- NONE -->|no match| PHRASES
-
- PHRASES[phrases check] -->|+6 per hit| SUM
- ALLOF[allOf check] -->|+4 per group| SUM
- ANYOF[anyOf check] -->|+1 per hit, max +2| SUM
-
- SUM[Total Score] --> THR{score >= minScore?}
- THR -->|Yes| ACCEPT[MATCHED]
- THR -->|No| REJECT2[NOT MATCHED]
- end
-```
-
-| Signal Type | Weight | Behavior |
-|-------------|--------|----------|
-| `phrases` | **+6** per hit | Exact substring match (case-insensitive, after normalization) |
-| `allOf` | **+4** per group | All terms in the group must appear in the prompt |
-| `anyOf` | **+1** per hit, **capped at +2** | Any term matches; cap prevents low-signal flooding |
-| `noneOf` | **-Infinity** | Hard suppress — if any noneOf term matches, the skill is excluded |
-| `minScore` | threshold (default **6**) | Score must meet or exceed this to qualify |
-
-**Example**: A skill with `phrases: ["deploy to vercel"]` and `minScore: 6`:
-- "How do I **deploy to vercel**?" → score 6 (one phrase hit) → matched
-- "How do I deploy?" → score 0 (no phrase hit) → not matched
-
-**Example**: Reaching threshold via allOf + anyOf only:
-```yaml
-promptSignals:
- allOf: [["cron", "schedule"]] # +4
- anyOf: ["vercel", "deploy", "production"] # +1 each, capped at +2
- minScore: 6
-```
-- "I need to schedule a cron job on vercel for production" → allOf +4, anyOf +2 = score 6 → matched
-
-### Normalization and Contractions
-
-Before scoring, both the prompt text and the signal terms undergo normalization:
-
-1. **Lowercase** — `"Deploy to Vercel"` → `"deploy to vercel"`
-2. **Contraction expansion** — `"it's"` → `"it is"`, `"don't"` → `"do not"`, `"can't"` → `"cannot"`
-3. **Stemming** — `"deploying"` → `"deploy"`, `"configured"` → `"configur"`
-4. **Whitespace collapse** — multiple spaces/newlines → single space
-
-This means skill authors don't need to account for contractions or verb tenses in their signal definitions.
-
-### Lexical Fallback Scoring
-
-**Source**: `prompt-patterns.mts:scorePromptWithLexical()`
-
-When exact prompt signal scoring doesn't reach the threshold, a **lexical index** (TF-IDF based) provides a fallback. The lexical score is boosted by 1.35x and compared against the exact score. The higher score wins.
-
-This ensures skills with strong keyword overlap still get matched even if the user's phrasing doesn't exactly hit the configured phrases.
-
-### Troubleshooting Intent Classification
-
-**Source**: `prompt-patterns.mts:classifyTroubleshootingIntent()`
-
-A regex-based classifier detects three troubleshooting intents in user prompts:
-
-| Intent | Pattern Examples | Routed Skills |
-|--------|-----------------|---------------|
-| `flow-verification` | "loads but", "submits but", "works locally but" | `verification` |
-| `stuck-investigation` | "stuck", "frozen", "timed out", "not responding" | `investigation-mode` |
-| `browser-only` | "blank page", "white screen", "console errors" | `agent-browser-verify`, `investigation-mode` |
-
-**Suppression**: Test framework mentions (`jest`, `vitest`, `playwright test`, etc.) suppress all verification-family skills to avoid injecting browser verification guidance during unit testing.
-
----
-
-## Ranking Pipeline
-
-Every matched skill goes through a ranking pipeline that determines injection order. The pipeline applies priority adjustments in layers:
-
-```mermaid
-flowchart TB
- BASE["Base Priority
(from SKILL.md frontmatter)
Range: 4–8, default 5"]
- VJ["Vercel.json Routing
Relevant key: +10
Irrelevant key: -10"]
- PROF["Profiler Boost
Likely skill: +5"]
- SETUP["Setup Mode
bootstrap: +50"]
- RANK["rankEntries()
Sort: effectivePriority DESC
Tiebreak: skill name ASC"]
- CAP["Cap at MAX_SKILLS
(5 for PreToolUse)"]
-
- BASE --> VJ --> PROF --> SETUP --> RANK --> CAP
-
- style BASE fill:#e1f5fe
- style VJ fill:#fff3e0
- style PROF fill:#e8f5e9
- style SETUP fill:#fce4ec
- style RANK fill:#f3e5f5
- style CAP fill:#fff9c4
-```
-
-### Base Priority
-
-Set in each skill's SKILL.md frontmatter:
-
-```yaml
-metadata:
- priority: 6 # Range 4–8, default 5
-```
-
-Higher priority means earlier injection. Skills with equal priority are sorted alphabetically.
-
-### Vercel.json Key-Aware Routing
-
-**Source**: `vercel-config.mts`
-
-When the tool target is a `vercel.json` file, the hook reads the file's keys and adjusts priorities for skills that claim `vercel.json` in their `pathPatterns`:
-
-| vercel.json Key | Relevant Skill |
-|----------------|----------------|
-| `redirects`, `rewrites`, `headers`, `cleanUrls`, `trailingSlash` | `routing-middleware` |
-| `crons` | `cron-jobs` |
-| `functions`, `regions` | `vercel-functions` |
-| `builds`, `buildCommand`, `installCommand`, `outputDirectory`, `framework`, `devCommand`, `ignoreCommand` | `deployments-cicd` |
-
-**Priority adjustment**:
-- Skill **is** relevant to the file's keys → **+10**
-- Skill **is not** relevant (but claims vercel.json) → **-10**
-
-This prevents irrelevant skills from being injected when editing vercel.json. For example, editing a vercel.json with `{ "crons": [...] }` will boost `cron-jobs` by +10 and demote `routing-middleware` by -10.
-
-### Profiler Boost
-
-The session-start profiler scans the project's dependencies, config files, and directory structure to predict which skills are likely relevant. These "likely skills" are stored in `VERCEL_PLUGIN_LIKELY_SKILLS`.
-
-**Boost**: +5 to `effectivePriority` for any matched skill that's also in the likely-skills set.
-
-The boost stacks on top of vercel.json routing:
-```
-effectivePriority = base + vercelJsonAdjustment + profilerBoost
-```
-
-### Setup Mode Routing
-
-When the project is greenfield (`VERCEL_PLUGIN_GREENFIELD=true`), the `bootstrap` skill gets a massive priority boost of **+50**, ensuring it's always injected first. If `bootstrap` didn't naturally match the tool call, it's synthetically added to the match set.
-
-### Unified Ranker
-
-**Source**: `patterns.mts:rankEntries()`
-
-After all priority adjustments, skills are sorted:
-1. **Primary**: `effectivePriority` DESC (or base `priority` if no adjustments)
-2. **Secondary**: skill name ASC (alphabetical tiebreak)
-
----
-
-## Dedup State Machine
-
-The dedup system prevents the same skill from being injected twice in a session. It uses three independent state sources that are merged into a unified view.
-
-```mermaid
-stateDiagram-v2
- [*] --> Initialized: SessionStart hook sets
VERCEL_PLUGIN_SEEN_SKILLS=""
-
- state "Three State Sources" as sources {
- EnvVar: Env Var
VERCEL_PLUGIN_SEEN_SKILLS
(comma-delimited)
- ClaimDir: Claim Directory
tmp/vercel-plugin-{sessionId}-seen-skills.d/
(one file per skill, atomic O_EXCL)
- SessionFile: Session File
tmp/vercel-plugin-{sessionId}-seen-skills.txt
(comma-delimited snapshot)
- }
-
- Initialized --> sources: Hook invocation
-
- sources --> Merge: mergeSeenSkillStates()
- Merge --> Check: Is skill in merged set?
-
- Check --> Skip: Yes → already injected
- Check --> Inject: No → new skill
-
- Inject --> Claim: tryClaimSessionKey()
(atomic openSync O_EXCL)
- Claim --> ClaimSuccess: File created
- Claim --> ClaimFail: File exists
(concurrent hook won)
-
- ClaimSuccess --> Sync: syncSessionFileFromClaims()
- Sync --> UpdateEnv: Update env var + session file
-
- ClaimFail --> Skip
-
- state "Fallback Strategies" as fallback {
- File: "file" strategy
(primary: atomic claims)
- EnvOnly: "env-var" strategy
(no session ID)
- Memory: "memory-only" strategy
(single invocation)
- Disabled: "disabled" strategy
(VERCEL_PLUGIN_HOOK_DEDUP=off)
- }
-
- Note right of fallback: Strategies degrade gracefully:
file → env-var → memory-only → disabled
-```
-
-### Three State Sources
-
-1. **Environment variable** (`VERCEL_PLUGIN_SEEN_SKILLS`): Comma-delimited list of skill slugs. Fast to read, but can drift if multiple hooks run concurrently.
-
-2. **Claim directory** (`/vercel-plugin--seen-skills.d/`): Contains one empty file per claimed skill. Files are created atomically with `openSync(path, "wx")` (O_EXCL flag), which provides a filesystem-level mutex. If two hooks try to claim the same skill simultaneously, only one succeeds.
-
-3. **Session file** (`/vercel-plugin--seen-skills.txt`): A comma-delimited snapshot periodically synced from the claim directory. Acts as a checkpoint.
-
-### Merge Strategy
-
-**Source**: `patterns.mts:mergeSeenSkillStates()`
-
-All three sources are unioned into a single `Set`. A skill is considered "seen" if it appears in **any** of the three sources.
-
-```
-mergedSeen = union(envVar, claimDir, sessionFile)
-```
-
-### Fallback Strategies
-
-The dedup system degrades gracefully:
-
-| Strategy | When | Behavior |
-|----------|------|----------|
-| **file** | Session ID available, filesystem writable | Full atomic claims + session file + env var |
-| **env-var** | No session ID, or claim dir unavailable | Env var only (no cross-process safety) |
-| **memory-only** | No env var support | In-memory set for single invocation |
-| **disabled** | `VERCEL_PLUGIN_HOOK_DEDUP=off` | No dedup — every match is injected |
-
-### Scope-Aware Dedup for Subagents
-
-**Source**: `patterns.mts:mergeScopedSeenSkillStates()`
-
-When running inside a subagent (identified by `scopeId` / `agent_id`):
-- The parent's **env var** is excluded from the merge, because it carries the parent's seen-skills and would incorrectly suppress skills the subagent hasn't seen
-- Only the **session file** and **claim directory** are merged
-- Claims are scoped to the subagent's scope ID
-
-This ensures subagents get fresh skill injection while still deduplicating within their own scope.
-
----
-
-## Budget Enforcement
-
-Budget enforcement prevents the plugin from flooding Claude's context window with too much skill content.
-
-### PreToolUse Budget
-
-| Parameter | Default | Env Override |
-|-----------|---------|-------------|
-| Byte budget | **18,000 bytes** (18 KB) | `VERCEL_PLUGIN_INJECTION_BUDGET` |
-| Max skills | **5** | — |
-
-Rules:
-- The **first skill always gets its full body**, regardless of budget
-- Subsequent skills must fit within the remaining byte budget
-- Skills are measured as UTF-8 bytes after wrapping in comment markers
-
-### UserPromptSubmit Budget
-
-| Parameter | Default | Env Override |
-|-----------|---------|-------------|
-| Byte budget | **8,000 bytes** (8 KB) | `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` |
-| Max skills | **2** | — |
-
-The smaller budget reflects that prompt-based injection is speculative — the user hasn't started working with specific files yet.
-
-### Summary Fallback
-
-When a skill's full body would exceed the remaining budget, the hook checks if a `summary` field exists in the frontmatter:
-
-```yaml
-summary: "Brief guidance for this skill (injected when budget exceeded)"
-```
-
-If the summary fits within budget, it's injected with a `mode:summary` marker:
-```html
-
-Brief guidance for this skill...
-
-```
-
-If neither the full body nor summary fits, the skill is dropped with a `droppedByBudget` classification.
-
----
-
-## Special-Case Triggers
-
-These triggers operate alongside the normal pattern-matching pipeline and have their own dedup/counter mechanisms.
-
-### TSX Review Trigger
-
-**Source**: `pretooluse-skill-inject.mts:checkTsxReviewTrigger()`
-
-After a configurable number of `.tsx` file edits (default: 3), the `react-best-practices` skill is injected with a massive priority boost (+40).
-
-| Parameter | Default | Env Override |
-|-----------|---------|-------------|
-| Edit threshold | 3 | `VERCEL_PLUGIN_REVIEW_THRESHOLD` |
-| Priority boost | +40 | — |
-
-**Behavior**:
-1. Every Edit/Write on a `.tsx` file increments `VERCEL_PLUGIN_TSX_EDIT_COUNT`
-2. When the count reaches the threshold, the trigger fires
-3. The counter resets after injection, allowing re-injection after another N edits
-4. This trigger **bypasses** the normal SEEN_SKILLS dedup — the counter is the sole gate
-
-### Dev Server Detection
-
-**Source**: `pretooluse-skill-inject.mts:checkDevServerVerify()`
-
-When Claude runs a dev server command (e.g., `next dev`, `npm run dev`, `vite`), the `agent-browser-verify` skill is injected to encourage browser-based verification.
-
-Detected patterns:
-```
-next dev, npm run dev, pnpm dev, bun dev, bun run dev,
-yarn dev, vite dev, vite, nuxt dev, vercel dev, astro dev
-```
-
-| Parameter | Value |
-|-----------|-------|
-| Priority boost | +45 |
-| Max iterations | 2 per session |
-| Loop guard env | `VERCEL_PLUGIN_DEV_VERIFY_COUNT` |
-| Companion skills | `verification` (co-injected) |
-
-**Graceful degradation**: If `agent-browser` is not installed (`VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE=0`), the hook injects an unavailability notice instead, suggesting the user install it.
-
-### Vercel Env Help
-
-**Source**: `pretooluse-skill-inject.mts:checkVercelEnvHelp()`
-
-One-time injection of a quick-reference guide when Claude runs `vercel env add`, `vercel env update`, or `vercel env pull`. The guide clarifies common pitfalls (e.g., "do NOT pass NAME=value as a positional argument").
-
-This uses the standard dedup system with key `vercel-env-help` — once shown, it won't appear again in the session.
-
-### Investigation Companion Selection
-
-**Source**: `user-prompt-submit-skill-inject.mts:selectInvestigationCompanion()`
-
-When `investigation-mode` is selected via prompt signals, the second skill slot is reserved for the best "companion" skill from a prioritized list:
-
-1. `workflow` (highest priority)
-2. `agent-browser-verify`
-3. `vercel-cli`
-
-The companion must have independently matched (score >= its minScore). This ensures debugging prompts get both the investigation methodology and a relevant tooling skill.
-
----
-
-## User Story: Why Didn't My Skill Inject?
-
-**Scenario**: You authored a new skill called `my-feature` with `pathPatterns: ["**/my-feature.config.ts"]`, but when Claude reads `src/my-feature.config.ts`, the skill doesn't appear.
-
-### Step 1: Use `vercel-plugin explain`
-
-```bash
-vercel-plugin explain src/my-feature.config.ts
-```
-
-This shows which skills match the file path, with a priority breakdown:
-
-```
-Matches for "src/my-feature.config.ts":
- ✓ my-feature priority=5 pattern="**/my-feature.config.ts" match=suffix
-
-Budget simulation (18000 bytes, max 3 skills):
- 1. my-feature body=2340B cumulative=2340B ✓ within budget
-```
-
-If your skill doesn't appear here, the pattern doesn't match. Check:
-- Is the glob correct? Run `bun run build:manifest` to recompile.
-- Is the pattern in `pathPatterns` (not `bashPatterns`)?
-
-### Step 2: Check dedup state
-
-```bash
-echo $VERCEL_PLUGIN_SEEN_SKILLS
-```
-
-If `my-feature` is already in the list, it was injected earlier in the session and dedup is filtering it out. This is expected behavior — skills inject once per session.
-
-To test without dedup:
-```bash
-VERCEL_PLUGIN_HOOK_DEDUP=off vercel-plugin explain src/my-feature.config.ts
-```
-
-### Step 3: Check budget
-
-If your skill appears in `explain` output but the budget simulation shows it as "dropped by budget", the preceding skills consumed too much of the 18 KB budget. Options:
-- Increase the budget: `VERCEL_PLUGIN_INJECTION_BUDGET=25000`
-- Reduce your skill's body size
-- Add a `summary` field for budget-constrained injection
-
-### Step 4: Enable debug logging
-
-```bash
-VERCEL_PLUGIN_LOG_LEVEL=debug
-```
-
-This produces structured JSON logs on stderr showing every pipeline stage:
-- `input-parsed` — what the hook received
-- `matches-found` — which skills matched and why
-- `dedup-filtered` — which skills were filtered out
-- `skill-injected` / `skill-dropped` — final injection decisions
-
-For maximum detail, use `VERCEL_PLUGIN_LOG_LEVEL=trace` to see every pattern evaluation.
-
-### Step 5: Check the manifest
-
-```bash
-cat generated/skill-manifest.json | jq '.skills["my-feature"]'
-```
-
-Verify:
-- `pathPatterns` contains your glob
-- `pathRegexSources` contains the compiled regex
-- The regex actually matches your file path
-
-If the manifest is stale, rebuild:
-```bash
-bun run build:manifest
-```
-
-### Common Gotchas
-
-| Symptom | Cause | Fix |
-|---------|-------|-----|
-| Skill never matches | Glob doesn't cover the path | Test with `vercel-plugin explain ` |
-| Skill matched but not injected | Already in `SEEN_SKILLS` | Expected — dedup prevents re-injection |
-| Skill matched but "dropped by budget" | Too many higher-priority skills | Add a `summary` fallback or increase budget |
-| Skill matches locally but not in session | Stale manifest | Run `bun run build:manifest` |
-| Prompt-based skill not matching | Phrases don't match after normalization | Check stemming (e.g., "deploying" stems to "deploy") |
-| Skill injected in parent but not subagent | Scope-aware dedup working correctly | Subagents get fresh dedup state |
diff --git a/docs/03-skill-authoring.md b/docs/03-skill-authoring.md
deleted file mode 100644
index d6ce37a..0000000
--- a/docs/03-skill-authoring.md
+++ /dev/null
@@ -1,548 +0,0 @@
-# 3. Skill Authoring Guide
-
-> **Audience**: Skill authors — anyone adding new skills or extending existing ones.
-
-This guide walks you through creating a new skill from scratch, explains every frontmatter field, documents the template include engine, and covers the custom YAML parser's non-standard behavior.
-
----
-
-## Table of Contents
-
-1. [User Story: Adding a New Vercel Feature Skill](#user-story-adding-a-new-vercel-feature-skill)
-2. [Step-by-Step: Create a Skill from Scratch](#step-by-step-create-a-skill-from-scratch)
-3. [SKILL.md Frontmatter Schema](#skillmd-frontmatter-schema)
- - [Top-Level Fields](#top-level-fields)
- - [metadata Object](#metadata-object)
- - [promptSignals Object](#promptsignals-object)
- - [validate Array](#validate-array)
- - [retrieval Object](#retrieval-object)
-4. [Pattern Matching Reference](#pattern-matching-reference)
- - [pathPatterns (Globs)](#pathpatterns-globs)
- - [bashPatterns (Regex)](#bashpatterns-regex)
- - [importPatterns (Package Matchers)](#importpatterns-package-matchers)
-5. [Prompt Signal Scoring](#prompt-signal-scoring)
-6. [Validation Rules](#validation-rules)
-7. [Template Include Engine](#template-include-engine)
- - [Section Includes](#section-includes)
- - [Frontmatter Includes](#frontmatter-includes)
- - [Build Workflow](#build-workflow)
-8. [Custom YAML Parser Gotchas](#custom-yaml-parser-gotchas)
-9. [Build & Test Workflow](#build--test-workflow)
-10. [Cross-References](#cross-references)
-
----
-
-## User Story: Adding a New Vercel Feature Skill
-
-> **Scenario**: You're a Vercel engineer. Vercel just shipped a new feature called "Edge Config" and you want Claude to automatically inject best-practice guidance whenever a developer touches Edge Config files or asks about it in a prompt.
-
-Here's the journey:
-
-```mermaid
-flowchart LR
- A["1. Create
skills/edge-config/SKILL.md"] --> B["2. Write frontmatter
(patterns, signals, validation)"]
- B --> C["3. Write body
(guidance markdown)"]
- C --> D["4. Build manifest
bun run build:manifest"]
- D --> E["5. Validate
bun run validate"]
- E --> F["6. Test with explain
bun run explain -- --file edge-config.json"]
- F --> G["7. Commit & ship"]
-```
-
-**What happens at runtime:**
-
-1. **SessionStart**: The profiler scans `package.json` — if `@vercel/edge-config` is a dependency, your skill gets a +5 priority boost via `VERCEL_PLUGIN_LIKELY_SKILLS`.
-2. **PreToolUse**: When Claude reads `edge-config.json` or runs `vercel env pull`, your skill's `pathPatterns` and `bashPatterns` match → it's ranked, deduped, and injected within the 18KB budget.
-3. **UserPromptSubmit**: When the developer types "how do I set up edge config?", your `promptSignals.phrases` score +6 → the skill injects within the 8KB budget.
-4. **PostToolUse**: When Claude writes to a matched file, your `validate` rules run and flag any antipatterns.
-
----
-
-## Step-by-Step: Create a Skill from Scratch
-
-### Step 1: Create the skill directory
-
-```bash
-mkdir -p skills/edge-config
-```
-
-Skills are keyed by **directory name**, not the frontmatter `name` field. The directory name is the canonical identifier used everywhere (dedup, manifest, env vars, logs).
-
-### Step 2: Create `SKILL.md` with frontmatter
-
-```bash
-touch skills/edge-config/SKILL.md
-```
-
-Start with this minimal skeleton:
-
-```markdown
----
-name: edge-config
-description: "Best practices for Vercel Edge Config — a low-latency global data store"
-summary: "Edge Config: use read() not get(), prefer JSON values"
-metadata:
- priority: 6
- pathPatterns:
- - "edge-config.*"
- - ".env*"
- bashPatterns:
- - "\\bedge.config\\b"
- importPatterns:
- - "@vercel/edge-config"
- promptSignals:
- phrases:
- - "edge config"
- - "edge-config"
- allOf:
- - ["vercel", "config", "edge"]
- anyOf:
- - "low latency"
- - "feature flags"
- noneOf:
- - "next.config"
- minScore: 6
- validate:
- - pattern: "edgeConfig\\.get\\("
- message: "Use edgeConfig.read() instead of .get() — read() returns typed values"
- severity: error
- - pattern: "new EdgeConfig\\("
- message: "Import createClient from @vercel/edge-config instead of constructing directly"
- severity: warn
- skipIfFileContains: "createClient"
----
-
-# Edge Config
-
-## When to Use
-
-Edge Config is a global, low-latency data store...
-
-## API Patterns
-
-...your guidance here...
-```
-
-### Step 3: Build the manifest
-
-```bash
-bun run build:manifest
-```
-
-This reads all `skills/*/SKILL.md` files, extracts frontmatter, compiles glob patterns to regex, and writes `generated/skill-manifest.json`. The manifest is what hooks read at runtime for fast matching.
-
-### Step 4: Validate
-
-```bash
-bun run validate
-```
-
-This checks:
-- Frontmatter parses without errors
-- Required fields are present
-- Patterns are valid (globs compile, regexes parse)
-- Manifest is in sync with live skills
-
-### Step 5: Test with explain
-
-```bash
-# Test file path matching
-bun run scripts/explain.ts --file edge-config.json
-
-# Test bash command matching
-bun run scripts/explain.ts --bash "vercel edge-config ls"
-
-# Test with profiler boost simulation
-bun run scripts/explain.ts --file edge-config.json --likely-skills edge-config
-```
-
-The explain command mirrors the runtime matching logic exactly — it shows priority scores, match reasons, and whether your skill would be injected within the budget.
-
-### Step 6: Run tests
-
-```bash
-bun test
-```
-
-If you added new patterns, consider adding a test case in `tests/` to cover your skill's matching behavior.
-
-### Step 7: Build everything and commit
-
-```bash
-bun run build # hooks + manifest + from-skills
-bun test # verify nothing broke
-```
-
----
-
-## SKILL.md Frontmatter Schema
-
-Every `SKILL.md` begins with a YAML frontmatter block between `---` delimiters. Below is the complete schema.
-
-### Top-Level Fields
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `name` | `string` | No | Human-readable name. Falls back to directory name if omitted. |
-| `description` | `string` | Yes | One-line description of the skill's purpose. |
-| `summary` | `string` | Recommended | Brief fallback text (injected when full body exceeds budget). Keep under ~200 chars. |
-| `metadata` | `object` | Yes | Contains all matching, scoring, and validation configuration. |
-
-### metadata Object
-
-| Field | Type | Default | Description |
-|-------|------|---------|-------------|
-| `priority` | `number` | `5` | Injection priority (range 4–8). Higher = injected first. |
-| `pathPatterns` | `string[]` | `[]` | Glob patterns for file path matching (see [Pattern Matching](#pathpatterns-globs)). |
-| `bashPatterns` | `string[]` | `[]` | Regex patterns for bash command matching. |
-| `importPatterns` | `string[]` | `[]` | Package name patterns for import/require matching. |
-| `promptSignals` | `object` | — | Prompt-based scoring configuration (see [promptSignals](#promptsignals-object)). |
-| `validate` | `object[]` | `[]` | PostToolUse validation rules (see [validate](#validate-array)). |
-| `retrieval` | `object` | — | Discovery metadata for search/retrieval systems (see [retrieval](#retrieval-object)). |
-
-### promptSignals Object
-
-Controls how the UserPromptSubmit hook scores user prompts against this skill.
-
-| Field | Type | Default | Description |
-|-------|------|---------|-------------|
-| `phrases` | `string[]` | `[]` | Exact substring matches (case-insensitive). Each hit scores **+6**. |
-| `allOf` | `string[][]` | `[]` | Groups of terms that must **all** appear. Each complete group scores **+4**. |
-| `anyOf` | `string[]` | `[]` | Optional terms. Each hit scores **+1**, capped at **+2 total**. |
-| `noneOf` | `string[]` | `[]` | Suppression terms. Any match sets score to **-Infinity** (hard suppress). |
-| `minScore` | `number` | `6` | Minimum score threshold for injection. |
-
-**Scoring example**: If a user types "I want to add the ai sdk for streaming", and the skill has:
-- `phrases: ["ai sdk"]` → +6 (substring match)
-- `allOf: [["streaming", "generation"]]` → +0 (only "streaming" matched, not "generation")
-- `anyOf: ["streaming"]` → +1
-
-Total: **7** ≥ `minScore: 6` → skill injects.
-
-### validate Array
-
-Each entry defines a PostToolUse validation rule that runs when Claude writes or edits a file matched by the skill.
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `pattern` | `string` | Yes | Regex pattern to search for in the written file content. |
-| `message` | `string` | Yes | Error/warning message shown to Claude when pattern matches. |
-| `severity` | `"error" \| "warn"` | Yes | `error` = must fix before proceeding. `warn` = advisory. |
-| `skipIfFileContains` | `string` | No | Regex — if the file also matches this pattern, skip the rule. |
-
-**Example**:
-
-```yaml
-validate:
- - pattern: "from\\s+['\"]openai['\"]"
- message: "Use @ai-sdk/openai provider instead of importing openai directly"
- severity: error
- skipIfFileContains: "@ai-sdk/openai"
-```
-
-This fires when Claude writes `import { OpenAI } from "openai"` but **not** if the file already contains `@ai-sdk/openai`.
-
-### retrieval Object
-
-Optional metadata for discovery systems (search, RAG, skill recommendation).
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `aliases` | `string[]` | Alternative names (e.g., `["vercel ai", "ai library"]`). |
-| `intents` | `string[]` | User intents this skill addresses (e.g., `["add ai to app"]`). |
-| `entities` | `string[]` | Key API symbols/functions (e.g., `["useChat", "streamText"]`). |
-
----
-
-## Pattern Matching Reference
-
-### pathPatterns (Globs)
-
-File path globs are compiled to regex at build time via `globToRegex()`. Supported syntax:
-
-| Pattern | Matches | Example |
-|---------|---------|---------|
-| `*` | Any characters except `/` | `*.ts` → `foo.ts`, not `dir/foo.ts` |
-| `**` | Any path depth (including zero) | `app/**/*.tsx` → `app/page.tsx`, `app/a/b/page.tsx` |
-| `?` | Single character | `?.ts` → `a.ts`, not `ab.ts` |
-| `{a,b}` | Alternation | `*.{ts,tsx}` → `foo.ts`, `foo.tsx` |
-| `[abc]` | Character class | `[._]env` → `.env`, `_env` |
-
-**Matching strategy at runtime** (PreToolUse tries in order):
-1. **Full path**: The glob matches the complete relative file path
-2. **Basename**: The glob matches just the filename
-3. **Suffix**: The path ends with the glob pattern
-
-### bashPatterns (Regex)
-
-Bash patterns are **JavaScript regular expressions** tested against the full bash command string. Common patterns:
-
-```yaml
-bashPatterns:
- - "\\bnext\\s+(dev|build|start)\\b" # next dev, next build, next start
- - "npm run (dev|build)" # npm scripts
- - "vercel\\s+deploy" # vercel deploy command
-```
-
-**Tips**:
-- Use `\\b` for word boundaries (YAML requires escaping the backslash)
-- Use alternation `(a|b)` for command variants
-- Patterns are case-sensitive by default
-
-### importPatterns (Package Matchers)
-
-Import patterns match against `import`/`require` statements in file content. They support wildcard scoping:
-
-```yaml
-importPatterns:
- - "ai" # Exact: import { x } from "ai"
- - "@ai-sdk/*" # Scoped wildcard: @ai-sdk/openai, @ai-sdk/anthropic
- - "@vercel/edge-config" # Exact scoped package
-```
-
-At build time, these are compiled into regex patterns with appropriate flags by `importPatternToRegex()`.
-
----
-
-## Prompt Signal Scoring
-
-The UserPromptSubmit hook normalizes the user's prompt before scoring:
-
-1. **Lowercased**: All comparisons are case-insensitive
-2. **Contraction expansion**: "don't" → "do not", "isn't" → "is not", etc.
-3. **Whitespace normalization**: Multiple spaces collapsed to single space
-
-Then each skill's `promptSignals` are evaluated:
-
-```mermaid
-flowchart TD
- PROMPT["User prompt text"] --> NORM["Normalize:
lowercase, expand contractions,
collapse whitespace"]
- NORM --> NONE{"noneOf
matches?"}
- NONE -->|Yes| SUPPRESS["-∞ → skip"]
- NONE -->|No| PHRASES["phrases: +6 each"]
- PHRASES --> ALLOF["allOf: +4 per complete group"]
- ALLOF --> ANYOF["anyOf: +1 each, max +2"]
- ANYOF --> LEXICAL["Lexical fallback:
+2 if skill terms overlap"]
- LEXICAL --> TOTAL{"total ≥ minScore?"}
- TOTAL -->|Yes| INJECT["Inject skill"]
- TOTAL -->|No| SKIP["Skip"]
-```
-
-**Lexical fallback scoring**: When phrase/allOf/anyOf scoring yields a low result, the system tokenizes both the prompt and skill metadata (description, phrases, entities) and checks for significant term overlap. This adds up to +2 and catches prompts that are topically relevant but don't match exact phrases.
-
----
-
-## Validation Rules
-
-Validation rules run in the PostToolUse hook after Claude writes or edits a file. The hook:
-
-1. Matches the written file path against all skills' `pathPatterns`
-2. For each matched skill, runs its `validate` rules against the file content
-3. Returns fix instructions as `additionalContext` for any violations
-
-**Rule execution flow**:
-
-```mermaid
-flowchart TD
- WRITE["Claude writes/edits file"] --> MATCH["Match file path → skills"]
- MATCH --> LOOP["For each matched skill"]
- LOOP --> RULE["For each validate rule"]
- RULE --> SKIP{"skipIfFileContains
matches?"}
- SKIP -->|Yes| NEXT["Skip rule"]
- SKIP -->|No| TEST{"pattern
matches file?"}
- TEST -->|Yes| REPORT["Report: severity + message"]
- TEST -->|No| NEXT
- NEXT --> RULE
-```
-
-**Best practices for validation rules**:
-- Use `severity: error` sparingly — only for patterns that will definitely break functionality
-- Use `severity: warn` for style preferences or potential issues
-- Use `skipIfFileContains` to avoid false positives (e.g., skip "use X" if X is already imported)
-- Keep `message` actionable — tell Claude what to do, not just what's wrong
-- Remember patterns are **regex**, so escape special characters (`.` → `\\.`, `(` → `\\(`)
-
----
-
-## Template Include Engine
-
-Skills are the single source of truth for domain knowledge. Agents and commands pull content from skills at build time via `.md.tmpl` templates, so they stay in sync without duplicating prose.
-
-### Section Includes
-
-Extract a markdown section by heading:
-
-```
-{{include:skill::}}
-```
-
-**Behavior**: Finds the heading (case-insensitive) in the skill's markdown body and extracts everything from that heading to the next heading of equal or higher level.
-
-**Example**: Given `skills/nextjs/SKILL.md` contains:
-
-```markdown
-## App Router
-
-Use the App Router for all new projects...
-
-### File Conventions
-
-- `page.tsx` — route entry point
-- `layout.tsx` — shared layout
-
-## Pages Router
-
-Legacy approach...
-```
-
-Then `{{include:skill:nextjs:App Router}}` extracts:
-
-```markdown
-## App Router
-
-Use the App Router for all new projects...
-
-### File Conventions
-
-- `page.tsx` — route entry point
-- `layout.tsx` — shared layout
-```
-
-(Stops at `## Pages Router` because it's an equal-level heading.)
-
-### Frontmatter Includes
-
-Extract a frontmatter field value:
-
-```
-{{include:skill::frontmatter:}}
-```
-
-Supports dotted paths for nested fields:
-
-```
-{{include:skill:nextjs:frontmatter:metadata.priority}} → "5"
-{{include:skill:ai-sdk:frontmatter:description}} → "Best practices for Vercel AI SDK..."
-```
-
-### Build Workflow
-
-```bash
-# Compile all .md.tmpl templates → .md files
-bun run build:from-skills
-
-# Check if generated .md files are up-to-date (CI mode)
-bun run build:from-skills:check
-```
-
-**Current templates** (8 files):
-
-| Template | Output |
-|----------|--------|
-| `agents/ai-architect.md.tmpl` | `agents/ai-architect.md` |
-| `agents/deployment-expert.md.tmpl` | `agents/deployment-expert.md` |
-| `agents/performance-optimizer.md.tmpl` | `agents/performance-optimizer.md` |
-| `commands/bootstrap.md.tmpl` | `commands/bootstrap.md` |
-| `commands/deploy.md.tmpl` | `commands/deploy.md` |
-| `commands/env.md.tmpl` | `commands/env.md` |
-| `commands/marketplace.md.tmpl` | `commands/marketplace.md` |
-| `commands/status.md.tmpl` | `commands/status.md` |
-
-**Diagnostics**: The build reports these codes when includes fail:
-- `SKILL_NOT_FOUND` — no `skills//SKILL.md` exists
-- `HEADING_NOT_FOUND` — heading doesn't exist in the skill body
-- `FRONTMATTER_NOT_FOUND` — field path doesn't exist in YAML
-- `STALE_OUTPUT` — generated `.md` is out of date
-
-**Dependency tracking**: `generated/build-from-skills.manifest.json` records which templates depend on which skills, enabling incremental builds and CI staleness checks.
-
----
-
-## Custom YAML Parser Gotchas
-
-The plugin uses a custom inline YAML parser (`parseSimpleYaml` in `skill-map-frontmatter.mjs`), **not** the standard `js-yaml` library. This parser has intentional non-standard behavior that skill authors must be aware of:
-
-### 1. Bare `null` → string `"null"`
-
-```yaml
-# Standard YAML: description is JavaScript null
-# This parser: description is the string "null"
-description: null
-```
-
-**Impact**: You never get JavaScript `null` from frontmatter — everything is a string or number.
-
-### 2. Bare `true`/`false` → strings
-
-```yaml
-# Standard YAML: enabled is boolean true
-# This parser: enabled is the string "true"
-enabled: true
-```
-
-**Impact**: Don't rely on boolean coercion. The build scripts and hooks handle this explicitly.
-
-### 3. Unclosed `[` → scalar string
-
-```yaml
-# Standard YAML: parse error
-# This parser: pathPatterns is the string "[app/**"
-pathPatterns: [app/**
-```
-
-**Impact**: Missing closing `]` won't cause an error — your pattern silently becomes a useless string. **Always close your brackets.**
-
-### 4. Tab indentation → explicit error
-
-```yaml
-# This causes a parse error:
-metadata:
-→priority: 5 # ← tab character
-```
-
-**Impact**: Use spaces only. The parser deliberately rejects tabs to avoid ambiguous indentation. This is the one case where the parser is **stricter** than standard YAML.
-
-### Summary table
-
-| Input | Standard YAML | This Parser |
-|-------|---------------|-------------|
-| `null` (bare) | `null` (JavaScript null) | `"null"` (string) |
-| `true` (bare) | `true` (boolean) | `"true"` (string) |
-| `false` (bare) | `false` (boolean) | `"false"` (string) |
-| `[unclosed` | Parse error | Scalar string `"[unclosed"` |
-| Tab indent | Usually accepted | **Parse error** |
-
----
-
-## Build & Test Workflow
-
-After creating or modifying a skill:
-
-```bash
-# 1. Build manifest (compiles frontmatter → JSON)
-bun run build:manifest
-
-# 2. Validate all skills
-bun run validate
-
-# 3. Test with explain CLI
-bun run scripts/explain.ts --file
-
-# 4. Run full test suite
-bun test
-
-# 5. Build everything (hooks + manifest + templates)
-bun run build
-
-# 6. Run doctor to check for issues
-bun run doctor
-```
-
-The pre-commit hook automatically runs `build:hooks` when `.mts` files are staged, but you should manually run `build:manifest` when changing frontmatter.
-
----
-
-## Cross-References
-
-- **[Architecture Overview](./01-architecture-overview.md)** — System diagram, hook lifecycle, glossary
-- **[Injection Pipeline Deep-Dive](./02-injection-pipeline.md)** — How pattern matching, ranking, and budget enforcement work at runtime
-- **[Operations & Debugging](./04-operations-debugging.md)** — Environment variables, logging, CLI tools, troubleshooting
diff --git a/docs/04-operations-debugging.md b/docs/04-operations-debugging.md
deleted file mode 100644
index 57864d5..0000000
--- a/docs/04-operations-debugging.md
+++ /dev/null
@@ -1,417 +0,0 @@
-# 4. Operations & Debugging Guide
-
-> **Audience**: Maintainers and contributors — anyone debugging skill injection, diagnosing issues, or operating the plugin in development.
-
-This guide covers environment variable configuration, logging, the CLI diagnostic tools (`doctor` and `explain`), dedup troubleshooting, and session cleanup.
-
----
-
-## Table of Contents
-
-1. [Environment Variable Decision Tree](#environment-variable-decision-tree)
-2. [Log Levels](#log-levels)
- - [off (default)](#off-default)
- - [summary](#summary)
- - [debug](#debug)
- - [trace](#trace)
-3. [CLI: vercel-plugin explain](#cli-vercel-plugin-explain)
-4. [CLI: vercel-plugin doctor](#cli-vercel-plugin-doctor)
-5. [Dedup Troubleshooting](#dedup-troubleshooting)
- - [How Dedup Works](#how-dedup-works)
- - [Common Issues](#common-issues)
- - [Dedup Strategy Fallbacks](#dedup-strategy-fallbacks)
-6. [Session Cleanup](#session-cleanup)
-7. [User Story: Why Didn't My Skill Inject?](#user-story-why-didnt-my-skill-inject)
-8. [Cross-References](#cross-references)
-
----
-
-## Environment Variable Decision Tree
-
-Rather than a flat table, use this decision tree to find the right variable for your situation:
-
-```mermaid
-flowchart TD
- START["What are you trying to do?"] --> DEBUG{"Debugging
injection?"}
- START --> TUNE{"Tuning
behavior?"}
- START --> DISABLE{"Disabling
features?"}
-
- DEBUG -->|"See why skills matched"| LOG["Set VERCEL_PLUGIN_LOG_LEVEL=debug"]
- DEBUG -->|"See every pattern test"| TRACE["Set VERCEL_PLUGIN_LOG_LEVEL=trace"]
- DEBUG -->|"See which skills were seen"| SEEN["Read VERCEL_PLUGIN_SEEN_SKILLS"]
- DEBUG -->|"Check profiler results"| LIKELY["Read VERCEL_PLUGIN_LIKELY_SKILLS"]
- DEBUG -->|"Write audit trail"| AUDIT["Set VERCEL_PLUGIN_AUDIT_LOG_FILE=/path"]
-
- TUNE -->|"Change PreToolUse budget"| BUDGET["VERCEL_PLUGIN_INJECTION_BUDGET=18000"]
- TUNE -->|"Change prompt budget"| PBUD["VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET=8000"]
- TUNE -->|"Change TSX review threshold"| TSX["VERCEL_PLUGIN_REVIEW_THRESHOLD=3"]
-
- DISABLE -->|"Turn off dedup"| DEDUP_OFF["VERCEL_PLUGIN_HOOK_DEDUP=off"]
- DISABLE -->|"Turn off audit log"| AUDIT_OFF["VERCEL_PLUGIN_AUDIT_LOG_FILE=off"]
-```
-
-### Complete Variable Reference
-
-| Variable | Default | Category | Description |
-|----------|---------|----------|-------------|
-| `VERCEL_PLUGIN_LOG_LEVEL` | `off` | Debugging | Log verbosity: `off` / `summary` / `debug` / `trace` |
-| `VERCEL_PLUGIN_DEBUG` | — | Debugging | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_HOOK_DEBUG` | — | Debugging | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | — | Debugging | Path for audit log, or `off` to disable |
-| `VERCEL_PLUGIN_SEEN_SKILLS` | `""` | State | Comma-delimited already-injected skills (managed by hooks) |
-| `VERCEL_PLUGIN_LIKELY_SKILLS` | — | State | Profiler-detected skills (comma-delimited, +5 boost) |
-| `VERCEL_PLUGIN_GREENFIELD` | — | State | `true` if project is empty (set by profiler) |
-| `VERCEL_PLUGIN_TSX_EDIT_COUNT` | `0` | State | Current `.tsx` edit count (tracked by PreToolUse) |
-| `VERCEL_PLUGIN_HOOK_DEDUP` | — | Control | `off` to disable dedup entirely |
-| `VERCEL_PLUGIN_INJECTION_BUDGET` | `18000` | Tuning | PreToolUse byte budget |
-| `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` | `8000` | Tuning | UserPromptSubmit byte budget |
-| `VERCEL_PLUGIN_REVIEW_THRESHOLD` | `3` | Tuning | TSX edits before react-best-practices injection |
-
----
-
-## Log Levels
-
-Logging is controlled by `VERCEL_PLUGIN_LOG_LEVEL` and outputs structured JSON to **stderr**. Each log entry includes an `invocationId`, event type, and timestamp.
-
-### off (default)
-
-No log output. Hooks run silently. Use this in production.
-
-### summary
-
-Shows high-level injection decisions — what was injected and why:
-
-```json
-{
- "invocationId": "abc123",
- "event": "complete",
- "hook": "pretooluse-skill-inject",
- "counts": {
- "matched": 4,
- "injected": 2,
- "deduped": 1,
- "capped": 1,
- "budgetDropped": 0
- },
- "injected": ["nextjs", "typescript"],
- "ts": 1710000000000
-}
-```
-
-**When to use**: Quick check on what's being injected without noise.
-
-### debug
-
-Everything in `summary` plus:
-- Skill routing decisions (why each skill was included/excluded)
-- Priority calculations (base + vercel.json routing + profiler boost)
-- Dedup state (which skills were already seen)
-- Budget enforcement details (byte sizes, summary fallbacks)
-- Pattern match details (which pattern matched which input)
-
-```json
-{
- "invocationId": "abc123",
- "event": "decision",
- "skill": "nextjs",
- "action": "inject",
- "reason": "pathPattern match: app/page.tsx",
- "effectivePriority": 10,
- "breakdown": { "base": 5, "profilerBoost": 5, "vercelJsonRouting": 0 },
- "bytes": 4200,
- "ts": 1710000000000
-}
-```
-
-**When to use**: Understanding why a specific skill did or didn't inject.
-
-### trace
-
-Everything in `debug` plus:
-- Every pattern test (including non-matches)
-- YAML parsing steps
-- File I/O operations
-- Dedup claim operations
-- Budget calculations per skill
-
-**When to use**: Diagnosing parser bugs, pattern compilation issues, or performance bottlenecks. Very verbose.
-
----
-
-## CLI: vercel-plugin explain
-
-The `explain` command mirrors runtime matching logic exactly, showing which skills would match a target and whether they'd be injected within the budget.
-
-### Usage
-
-```bash
-vercel-plugin explain [options]
-
-# Or via bun:
-bun run scripts/explain.ts [options]
-```
-
-### Options
-
-| Flag | Description |
-|------|-------------|
-| `--file ` | Explicitly treat target as a file path |
-| `--bash ` | Explicitly treat target as a bash command |
-| `--json` | Machine-readable JSON output |
-| `--project ` | Override plugin root directory |
-| `--likely-skills s1,s2` | Simulate profiler boost for these skills |
-| `--budget ` | Override injection budget (default: 12000) |
-
-### Example: File path matching
-
-```
-$ vercel-plugin explain app/api/chat/route.ts
-
- Skill Matches for: app/api/chat/route.ts
- (detected as: file path)
-
- ┌─────────┬──────────┬───────────┬────────────────────────────┬───────────┐
- │ Skill │ Priority │ Effective │ Matched Pattern │ Injection │
- ├─────────┼──────────┼───────────┼────────────────────────────┼───────────┤
- │ ai-sdk │ 8 │ 8 │ app/api/chat/** (file:full)│ full │
- │ nextjs │ 5 │ 5 │ app/** (file:full) │ full │
- │ ts-node │ 4 │ 4 │ **/*.ts (file:full) │ dropped │
- └─────────┴──────────┴───────────┴────────────────────────────┴───────────┘
-
- Budget: 2 of 3 skills injected (8,400 / 12,000 bytes)
- Dropped: ts-node (capped at 3 max skills)
-```
-
-### Example: Bash command matching
-
-```
-$ vercel-plugin explain --bash "next dev --turbo"
-
- Skill Matches for: next dev --turbo
- (detected as: bash command)
-
- ┌────────┬──────────┬───────────┬───────────────────────────────┬───────────┐
- │ Skill │ Priority │ Effective │ Matched Pattern │ Injection │
- ├────────┼──────────┼───────────┼───────────────────────────────┼───────────┤
- │ nextjs │ 5 │ 5 │ \bnext\s+(dev|build) (bash) │ full │
- └────────┴──────────┴───────────┴───────────────────────────────┴───────────┘
-
- Budget: 1 of 3 skills injected (4,200 / 12,000 bytes)
-```
-
-### Example: With profiler boost
-
-```
-$ vercel-plugin explain app/page.tsx --likely-skills nextjs
-
- ┌────────┬──────────┬───────────┬──────────────────┬───────────┐
- │ Skill │ Priority │ Effective │ Matched Pattern │ Injection │
- ├────────┼──────────┼───────────┼──────────────────┼───────────┤
- │ nextjs │ 5 │ 10 │ app/** (file) │ full │
- └────────┴──────────┴───────────┴──────────────────┴───────────┘
-
- Priority breakdown for nextjs: base(5) + profiler(+5) = 10
-```
-
----
-
-## CLI: vercel-plugin doctor
-
-The `doctor` command runs self-diagnosis checks on the plugin installation.
-
-### Usage
-
-```bash
-vercel-plugin doctor [options]
-
-# Or via bun:
-bun run doctor
-```
-
-### Options
-
-| Flag | Description |
-|------|-------------|
-| `--json` | Machine-readable JSON output |
-| `--project ` | Override plugin root directory |
-
-### Checks Performed
-
-| Check | What It Does | Failure Means |
-|-------|-------------|---------------|
-| **Skill validation** | Parses all `SKILL.md` frontmatter | Malformed YAML, missing required fields |
-| **Build diagnostics** | Loads and validates skill map | Invalid patterns, deprecated fields |
-| **Manifest parity** | Compares live scan vs `skill-manifest.json` | Manifest is out of date — run `bun run build:manifest` |
-| **Hook timeout risk** | Warns if skill/pattern count is high | >50 skills or >200 patterns may hit 5s timeout |
-| **Dedup strategy** | Validates dedup env var format | Malformed `VERCEL_PLUGIN_SEEN_SKILLS` |
-| **Template staleness** | Checks `.md.tmpl` mtime vs `.md` mtime | Run `bun run build:from-skills` to regenerate |
-| **Subagent hooks** | Validates SubagentStart/Stop registration | Missing or misconfigured subagent hooks |
-
-### Example Output
-
-```
-$ vercel-plugin doctor
-
- vercel-plugin doctor
- ─────────────────────────────
-
- ✓ 43 skills loaded, 0 errors
- ✓ Manifest matches live scan (43 skills)
- ✓ 187 total patterns (under 200 threshold)
- ✓ Dedup strategy: env-var (validated)
- ✓ 8 templates up-to-date
- ✓ Subagent hooks registered
-
- ⚠ Warnings:
- - skills/legacy-api: DEPRECATED_FIELD — filePattern is deprecated, use pathPatterns
- - Manifest drift: skills/new-feature missing from manifest
- Hint: Run `bun run build:manifest` to regenerate
-
- Summary: 0 errors, 2 warnings
-```
-
----
-
-## Dedup Troubleshooting
-
-### How Dedup Works
-
-The dedup system prevents the same skill from being injected twice in a Claude session. It uses three coordinated state sources:
-
-```mermaid
-stateDiagram-v2
- [*] --> SessionStart: session begins
-
- state SessionStart {
- [*] --> InitEnvVar: VERCEL_PLUGIN_SEEN_SKILLS=""
- InitEnvVar --> CreateClaimDir: mkdir /vercel-plugin--seen-skills.d/
- }
-
- SessionStart --> HookInvocation: PreToolUse or UserPromptSubmit fires
-
- state HookInvocation {
- [*] --> MergeState: mergeSeenSkillStates()
- MergeState --> CheckClaim: skill already claimed?
-
- state MergeState {
- [*] --> ReadEnvVar: VERCEL_PLUGIN_SEEN_SKILLS
- ReadEnvVar --> ReadClaimDir: claim dir files
- ReadClaimDir --> ReadSessionFile: session .txt file
- ReadSessionFile --> Union: union all three
- }
-
- CheckClaim -->|No| AtomicClaim: openSync(path, "wx") O_EXCL
- CheckClaim -->|Yes| Skip: skill already injected
- AtomicClaim --> UpdateEnvVar: append to VERCEL_PLUGIN_SEEN_SKILLS
- AtomicClaim --> SyncSessionFile: write session .txt
- }
-
- HookInvocation --> SessionEnd: session ends
-
- state SessionEnd {
- [*] --> Cleanup: delete claim dir + session file
- }
-```
-
-### Common Issues
-
-**Skill injecting twice (dedup not working)**:
-1. Check `VERCEL_PLUGIN_SEEN_SKILLS` — is it being set? Run with `VERCEL_PLUGIN_LOG_LEVEL=debug` to see dedup state.
-2. Check the claim directory exists: `ls /tmp/vercel-plugin-*-seen-skills.d/`
-3. Check for stale claim dirs from crashed sessions: `ls -la /tmp/vercel-plugin-*`
-4. Verify `session-start-seen-skills.mjs` ran (check SessionStart in logs)
-
-**Skill not injecting (dedup too aggressive)**:
-1. Check if the skill is in `VERCEL_PLUGIN_SEEN_SKILLS` — it may have been injected earlier in the session
-2. Check the claim dir for an existing claim file for that skill
-3. Run `VERCEL_PLUGIN_HOOK_DEDUP=off` to temporarily disable dedup and confirm the skill matches
-
-**Stale temp files after crash**:
-1. `session-end-cleanup.mjs` normally handles this, but crashes can leave orphans
-2. Safe to manually delete: `rm -rf /tmp/vercel-plugin-*`
-
-### Dedup Strategy Fallbacks
-
-The dedup system tries strategies in order, falling back on failure:
-
-| Strategy | How | When |
-|----------|-----|------|
-| **file** (preferred) | Atomic file claims via `O_EXCL` | Default — works when `/tmp` is writable |
-| **env-var** | Reads/writes `VERCEL_PLUGIN_SEEN_SKILLS` | Fallback when claim dir fails |
-| **memory-only** | In-process Set | Single hook invocation (no cross-hook persistence) |
-| **disabled** | No dedup | `VERCEL_PLUGIN_HOOK_DEDUP=off` |
-
-Run `vercel-plugin doctor` to see which strategy is active.
-
----
-
-## Session Cleanup
-
-The `session-end-cleanup.mjs` hook runs at SessionEnd and removes all session-scoped temporary files:
-
-- **Claim directories**: `/vercel-plugin--seen-skills.d/`
-- **Pending launch dirs**: `/vercel-plugin--pending-launches/`
-- **Session files**: `/vercel-plugin--seen-skills.txt`
-
-**Safety**: The hook validates session IDs (alphanumeric, underscore, dash only) and SHA-256 hashes unsafe IDs before using them in file paths. Cleanup failures are silently ignored — the hook always exits 0.
-
----
-
-## User Story: Why Didn't My Skill Inject?
-
-> **Scenario**: You added a new skill `edge-config` but it's not showing up when you edit `edge-config.json`.
-
-**Step 1**: Check if the skill is in the manifest.
-
-```bash
-bun run doctor
-# Look for: "Manifest drift: edge-config missing from manifest"
-```
-
-If missing, rebuild: `bun run build:manifest`
-
-**Step 2**: Check if the pattern matches.
-
-```bash
-bun run scripts/explain.ts --file edge-config.json
-# Should show edge-config in the results
-```
-
-If not matching, check your `pathPatterns` — maybe you need `"edge-config.*"` instead of `"edge-config.json"`.
-
-**Step 3**: Check if it was already injected (dedup).
-
-```bash
-# In your Claude session, check the env:
-echo $VERCEL_PLUGIN_SEEN_SKILLS
-# If edge-config is listed, it was already injected earlier
-```
-
-**Step 4**: Check if it was dropped by budget.
-
-```bash
-bun run scripts/explain.ts --file edge-config.json --budget 18000
-# Look at the "Injection" column — "dropped" means budget exceeded
-```
-
-If dropped, consider:
-- Increasing priority (up to 8)
-- Adding a concise `summary` for summary fallback
-- Reducing the skill body size
-
-**Step 5**: Enable debug logging for full visibility.
-
-```bash
-export VERCEL_PLUGIN_LOG_LEVEL=debug
-# Now open Claude Code — check stderr for decision logs
-```
-
-The debug output shows exactly why each skill was included or excluded, with priority breakdowns and budget calculations.
-
----
-
-## Cross-References
-
-- **[Architecture Overview](./01-architecture-overview.md)** — System diagram, hook lifecycle, glossary
-- **[Injection Pipeline Deep-Dive](./02-injection-pipeline.md)** — Pattern matching, ranking, budget enforcement internals
-- **[Skill Authoring Guide](./03-skill-authoring.md)** — Creating skills, frontmatter schema, YAML parser gotchas
diff --git a/docs/05-reference.md b/docs/05-reference.md
deleted file mode 100644
index aeec95b..0000000
--- a/docs/05-reference.md
+++ /dev/null
@@ -1,391 +0,0 @@
-# Reference
-
-> **Audience**: All — developers, skill authors, maintainers. This section is the canonical lookup for every configurable surface in the Vercel Plugin.
-
----
-
-## Table of Contents
-
-1. [Hook Registry](#hook-registry)
-2. [SyncHookJSONOutput Type Shape](#synchookjsonoutput-type-shape)
-3. [Environment Variables](#environment-variables)
-4. [SKILL.md Frontmatter Reference](#skillmd-frontmatter-reference)
-5. [YAML Parser Edge Cases](#yaml-parser-edge-cases)
-6. [Skill Catalog](#skill-catalog)
-7. [Budget & Limit Constants](#budget--limit-constants)
-8. [Cross-References](#cross-references)
-
----
-
-## Hook Registry
-
-Every hook registered in `hooks/hooks.json`. All hooks run via `node "${CLAUDE_PLUGIN_ROOT}/hooks/.mjs"`.
-
-| Event | Hook File | Matcher | Timeout | Description |
-|-------|-----------|---------|---------|-------------|
-| SessionStart | `session-start-seen-skills.mjs` | `startup\|resume\|clear\|compact` | — | Initializes `VERCEL_PLUGIN_SEEN_SKILLS=""` in the env file for dedup tracking |
-| SessionStart | `session-start-profiler.mjs` | `startup\|resume\|clear\|compact` | — | Scans project config files + package deps → sets `VERCEL_PLUGIN_LIKELY_SKILLS` (+5 priority boost); detects greenfield mode |
-| SessionStart | `inject-claude-md.mjs` | `startup\|resume\|clear\|compact` | — | Injects `vercel.md` ecosystem guide (~52KB) as additionalContext |
-| PreToolUse | `pretooluse-skill-inject.mjs` | `Read\|Edit\|Write\|Bash` | 5s | **Main injection engine.** Pattern match → rank → dedup → budget enforcement (max 3 skills, 18KB) |
-| PreToolUse | `pretooluse-subagent-spawn-observe.mjs` | `Agent` | 5s | **Observer.** Captures pending subagent spawn metadata to JSONL file |
-| UserPromptSubmit | `user-prompt-submit-skill-inject.mjs` | *(all prompts)* | 5s | Prompt signal scoring engine — phrases, allOf, anyOf, noneOf → inject up to 2 skills within 8KB |
-| PostToolUse | `posttooluse-shadcn-font-fix.mjs` | `Bash` | 5s | Fixes shadcn font loading issues by patching font import statements |
-| PostToolUse | `posttooluse-verification-observe.mjs` | `Bash` | 5s | **Observer.** Classifies bash commands into verification boundaries (uiRender, clientRequest, serverHandler, environment) |
-| PostToolUse | `posttooluse-validate.mjs` | `Write\|Edit` | 5s | Runs skill-defined validation rules on written/edited files; reports errors and warnings |
-| SubagentStart | `subagent-start-bootstrap.mjs` | `.+` | 5s | Budget-aware context injection for subagents — scales by agent type (Explore ~1KB, Plan ~3KB, GP ~8KB) |
-| SubagentStop | `subagent-stop-sync.mjs` | `.+` | 5s | **Observer.** Records subagent lifecycle metadata to JSONL ledger |
-| SessionEnd | `session-end-cleanup.mjs` | *(always)* | — | Best-effort cleanup of all session-scoped temp files (dedup claims, profile cache, pending launches, ledger) |
-
-### Shared Library Modules
-
-These are imported by entry-point hooks, not registered in `hooks.json`:
-
-| Module | Source | Purpose |
-|--------|--------|---------|
-| `hook-env.mts` | `hooks/src/hook-env.mts` | Shared runtime helpers: env file parsing, plugin root resolution, dedup claim operations (atomic O_EXCL), audit logging |
-| `patterns.mts` | `hooks/src/patterns.mts` | Glob→regex conversion, path/bash/import matching with match reasons, ranking engine, dedup state merging |
-| `prompt-patterns.mts` | `hooks/src/prompt-patterns.mts` | Prompt text normalization (contraction expansion), signal compilation, scoring, lexical fallback |
-| `prompt-analysis.mts` | `hooks/src/prompt-analysis.mts` | Dry-run prompt analysis reports for debugging prompt matching |
-| `skill-map-frontmatter.mts` | `hooks/src/skill-map-frontmatter.mts` | Inline YAML parser, frontmatter extraction, `buildSkillMap()`, `validateSkillMap()` |
-| `logger.mts` | `hooks/src/logger.mts` | Structured JSON logging to stderr (off/summary/debug/trace levels) |
-| `vercel-config.mts` | `hooks/src/vercel-config.mts` | Reads `vercel.json` keys → maps to skill routing adjustments (±10 priority) |
-| `lexical-index.mts` | `hooks/src/lexical-index.mts` | MiniSearch-based lexical fallback index for fuzzy skill matching |
-| `subagent-state.mts` | `hooks/src/subagent-state.mts` | File-locked JSONL operations for pending launches and agent-scoped dedup claims |
-| `shared-contractions.mts` | `hooks/src/shared-contractions.mts` | Contraction expansion map shared across prompt normalizers |
-| `stemmer.mts` | `hooks/src/stemmer.mts` | Lightweight word stemmer for lexical index tokenization |
-
----
-
-## SyncHookJSONOutput Type Shape
-
-All hooks output JSON conforming to the `SyncHookJSONOutput` type from `@anthropic-ai/claude-agent-sdk`. The shape varies by hook event:
-
-```typescript
-// Imported from @anthropic-ai/claude-agent-sdk
-type SyncHookJSONOutput = {
- hookSpecificOutput?: {
- hookEventName: string; // Must match the hook's event (e.g., "PreToolUse")
- additionalContext?: string; // Markdown injected into Claude's context
- // Event-specific fields (see below)
- };
- envUpdate?: Record; // Updates to CLAUDE_ENV_FILE
-};
-```
-
-### Per-Event Output Shapes
-
-**PreToolUse** (`pretooluse-skill-inject`):
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "PreToolUse",
- "additionalContext": "\n\n\n"
- }
-}
-```
-
-**UserPromptSubmit** (`user-prompt-submit-skill-inject`):
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "UserPromptSubmit",
- "additionalContext": "\n\n\n"
- }
-}
-```
-
-**PostToolUse** (`posttooluse-validate`):
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "PostToolUse",
- "additionalContext": "## Validation Results\n\n"
- }
-}
-```
-
-**SubagentStart** (`subagent-start-bootstrap`):
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "SubagentStart",
- "additionalContext": ""
- }
-}
-```
-
-**Observer hooks** return empty output (no context injection):
-```json
-{}
-```
-
-### Metadata Comments
-
-Injection hooks embed a hidden HTML comment in the additionalContext with structured metadata:
-
-```
-
-
-
-```
-
-These comments are invisible to the user but machine-parseable for debugging and testing.
-
----
-
-## Environment Variables
-
-All environment variables that influence plugin behavior. Set these in your shell or via `CLAUDE_ENV_FILE`.
-
-| Variable | Default | Type | Description |
-|----------|---------|------|-------------|
-| `VERCEL_PLUGIN_LOG_LEVEL` | `off` | `off` \| `summary` \| `debug` \| `trace` | Controls hook logging verbosity to stderr. `summary` shows per-invocation one-liners. `debug` adds match details. `trace` adds full input/output dumps |
-| `VERCEL_PLUGIN_DEBUG` | — | `1` or unset | Legacy toggle: `1` maps to `debug` log level. Prefer `VERCEL_PLUGIN_LOG_LEVEL` |
-| `VERCEL_PLUGIN_HOOK_DEBUG` | — | `1` or unset | Legacy toggle: `1` maps to `debug` log level. Prefer `VERCEL_PLUGIN_LOG_LEVEL` |
-| `VERCEL_PLUGIN_SEEN_SKILLS` | `""` | comma-delimited string | Already-injected skills for this session. Initialized by `session-start-seen-skills`. Updated by injection hooks. Used for dedup |
-| `VERCEL_PLUGIN_HOOK_DEDUP` | — | `off` or unset | Set to `off` to disable all deduplication. Skills may be injected multiple times. Useful for testing |
-| `VERCEL_PLUGIN_LIKELY_SKILLS` | — | comma-delimited string | Skills identified by the profiler at session start. These receive a **+5 priority boost** during ranking |
-| `VERCEL_PLUGIN_GREENFIELD` | — | `true` or unset | Set by profiler when project has no source files (only dot-directories). Triggers greenfield execution mode |
-| `VERCEL_PLUGIN_INJECTION_BUDGET` | `18000` | integer (bytes) | Maximum total byte size of skill content injected per PreToolUse invocation |
-| `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` | `8000` | integer (bytes) | Maximum total byte size of skill content injected per UserPromptSubmit invocation |
-| `VERCEL_PLUGIN_REVIEW_THRESHOLD` | `3` | integer | Number of `.tsx` file edits before `react-best-practices` skill is automatically injected |
-| `VERCEL_PLUGIN_TSX_EDIT_COUNT` | `0` | integer | Current count of `.tsx` edits in this session. Tracked by PreToolUse hook |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | — | file path or `off` | Path to append structured audit log entries. Set to `off` to disable. Unset = no audit logging |
-
-### Environment Variable Decision Tree
-
-```
-Is the plugin not injecting skills you expect?
-├─ Check VERCEL_PLUGIN_LOG_LEVEL=debug for match details
-├─ Check VERCEL_PLUGIN_SEEN_SKILLS — is the skill already claimed?
-│ └─ Set VERCEL_PLUGIN_HOOK_DEDUP=off to test without dedup
-├─ Check VERCEL_PLUGIN_LIKELY_SKILLS — is profiler detecting your stack?
-└─ Check VERCEL_PLUGIN_INJECTION_BUDGET — is budget too small?
-
-Is the plugin injecting too many skills?
-├─ Lower VERCEL_PLUGIN_INJECTION_BUDGET (default 18000)
-├─ Lower VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET (default 8000)
-└─ Increase skill minScore thresholds in SKILL.md frontmatter
-
-Is TSX review triggering too early/late?
-└─ Adjust VERCEL_PLUGIN_REVIEW_THRESHOLD (default 3)
-```
-
----
-
-## SKILL.md Frontmatter Reference
-
-Every skill lives in `skills//SKILL.md`. The file has YAML frontmatter (between `---` delimiters) followed by a markdown body.
-
-### Complete Field Reference
-
-```yaml
----
-# Required fields
-name: skill-slug # Unique identifier, must match directory name
-description: "One-line description" # What this skill does (shown in catalogs)
-
-# Optional top-level fields
-summary: "Brief fallback text" # Injected when full body exceeds budget
- # (typically 1-2 sentences)
-
-# Metadata block (all fields optional)
-metadata:
- priority: 6 # Base injection priority (range: 2-9)
- # 2-3: Low priority (browser, marketplace)
- # 4-5: Standard (libraries, utilities)
- # 6-7: High (core Vercel features)
- # 8-9: Critical (AI SDK, functions, workflow)
-
- # Pattern matching — at least one pattern type should be defined
- pathPatterns: # File glob patterns (matched against tool target paths)
- - "vercel.json" # Standard globs: *, **, ?, [abc]
- - "app/**/route.ts" # Compiled to regex at build time
- - "*.config.{js,ts,mjs}" # Brace expansion supported
-
- bashPatterns: # Regex patterns (matched against bash commands)
- - "\\bvercel\\s+deploy\\b" # Full regex syntax
- - "npx\\s+turbo" # Escaped for YAML string context
-
- importPatterns: # Package name patterns (matched against import/require)
- - "ai" # Bare package names
- - "@vercel/blob" # Scoped packages
- - "next/.*" # Regex subpath patterns
-
- # Prompt signal scoring (for UserPromptSubmit hook)
- promptSignals:
- phrases: # Exact substring matches (case-insensitive)
- - "cron job" # Each match: +6 points
- - "scheduled task"
-
- allOf: # Groups where ALL terms must appear
- - ["deploy", "preview"] # Each satisfied group: +4 points
- - ["rollback", "production"]
-
- anyOf: # Optional boosters
- - "schedule" # Each match: +1 point
- - "timer" # Capped at +2 total from anyOf
-
- noneOf: # Hard suppressors
- - "unrelated term" # Any match: score → -Infinity (skill excluded)
-
- minScore: 6 # Threshold to trigger injection (default: 6)
-
- # Post-write validation rules
- validate:
- - pattern: "require\\(" # Regex matched against written file content
- message: "Use ESM imports" # Error/warning message shown to Claude
- severity: "error" # "error" (must fix) or "warn" (suggestion)
- skipIfFileContains: "\"use server\"" # Optional: skip rule if file matches this regex
-
- # Retrieval metadata (for search/discovery tooling)
- retrieval:
- aliases: # Alternative names for this skill
- - "vercel-cron"
- - "scheduled-tasks"
- relatedSkills: # Skills commonly used together
- - "vercel-functions"
- - "env-vars"
----
-
-# Skill Title
-
-Markdown body goes here. This is the content injected into Claude's
-context as additionalContext when the skill matches.
-```
-
-### Field Details
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `name` | string | Yes | Unique skill identifier. Must match the directory name under `skills/` |
-| `description` | string | Yes | One-line description for catalogs and tooling |
-| `summary` | string | No | Short fallback text injected when full body exceeds budget |
-| `metadata.priority` | integer | No | Base injection priority. Range 2–9. Default varies by build. Higher = injected first |
-| `metadata.pathPatterns` | string[] | No | Glob patterns matched against file paths in Read/Edit/Write tools |
-| `metadata.bashPatterns` | string[] | No | Regex patterns matched against Bash tool commands |
-| `metadata.importPatterns` | string[] | No | Package name patterns matched against import/require statements |
-| `metadata.promptSignals` | object | No | Scoring rules for UserPromptSubmit matching (see below) |
-| `metadata.promptSignals.phrases` | string[] | No | Exact substring matches. +6 points each |
-| `metadata.promptSignals.allOf` | string[][] | No | AND-groups. +4 points per satisfied group |
-| `metadata.promptSignals.anyOf` | string[] | No | Optional boosters. +1 each, capped at +2 |
-| `metadata.promptSignals.noneOf` | string[] | No | Hard suppressors. Any match → score = -Infinity |
-| `metadata.promptSignals.minScore` | integer | No | Minimum score to trigger injection. Default: 6 |
-| `metadata.validate` | object[] | No | Post-write validation rules |
-| `metadata.validate[].pattern` | string | Yes* | Regex matched against file content |
-| `metadata.validate[].message` | string | Yes* | Error/warning message |
-| `metadata.validate[].severity` | string | Yes* | `"error"` or `"warn"` |
-| `metadata.validate[].skipIfFileContains` | string | No | Skip rule if file matches this regex |
-| `metadata.retrieval` | object | No | Discovery metadata for search tooling |
-| `metadata.retrieval.aliases` | string[] | No | Alternative names for the skill |
-| `metadata.retrieval.relatedSkills` | string[] | No | Commonly co-used skills |
-
----
-
-## YAML Parser Edge Cases
-
-The plugin uses a custom inline `parseSimpleYaml` parser in `skill-map-frontmatter.mjs`, **not** the `js-yaml` library. This means some YAML constructs behave differently:
-
-| Input | Expected (js-yaml) | Actual (parseSimpleYaml) |
-|-------|---------------------|--------------------------|
-| Bare `null` | JavaScript `null` | String `"null"` |
-| Bare `true` | Boolean `true` | String `"true"` |
-| Bare `false` | Boolean `false` | String `"false"` |
-| Unclosed `[` array | Parse error | Treated as scalar string |
-| Tab indentation | Usually accepted | **Explicit error** |
-
-**Why?** The custom parser prioritizes safety and predictability for frontmatter parsing. Treating bare keywords as strings avoids accidental type coercion in skill metadata.
-
----
-
-## Skill Catalog
-
-All 45 skills, sorted by priority (highest first). Each skill lives in `skills//SKILL.md`.
-
-| Skill | Priority | Description | Trigger Types |
-|-------|----------|-------------|---------------|
-| `workflow` | 9 | Vercel Workflow DevKit (WDK) — durable workflows, pause/resume, retries, step-based execution | path, bash, import, prompt |
-| `ai-sdk` | 8 | Vercel AI SDK — chat, text generation, structured output, tool calling, agents, MCP, streaming | path, bash, import, prompt |
-| `bootstrap` | 8 | Project bootstrapping orchestrator — linking, env provisioning, first-run setup | path, bash, import |
-| `chat-sdk` | 8 | Vercel Chat SDK — multi-platform chat bots (Slack, Telegram, Teams, Discord, etc.) | path, bash, import, prompt |
-| `investigation-mode` | 8 | Orchestrated debugging coordinator — logs → workflow → browser → deploy triage | path, bash, prompt |
-| `vercel-functions` | 8 | Serverless/Edge Functions, Fluid Compute, streaming, Cron Jobs, runtime config | path, bash |
-| `ai-gateway` | 7 | AI Gateway — model routing, provider failover, cost tracking, unified API | bash, import |
-| `env-vars` | 7 | Environment variables — `.env` files, `vercel env`, OIDC tokens | path, bash |
-| `vercel-api` | 7 | Vercel MCP and REST API — projects, deployments, domains, logs | path, bash |
-| `vercel-storage` | 7 | Storage — Blob, Edge Config, Neon Postgres, Upstash Redis | path, bash, import |
-| `verification` | 7 | Full-story verification — browser + server + data flow + env | bash, prompt |
-| `auth` | 6 | Authentication — Clerk, Descope, Auth0 with Next.js | path, bash |
-| `ai-generation-persistence` | 6 | AI generation persistence — unique IDs, DB/Blob storage, addressable URLs, cost tracking | path, import, prompt |
-| `cron-jobs` | 6 | Cron Jobs configuration and best practices | path |
-| `deployments-cicd` | 6 | Deployments and CI/CD — deploy, promote, rollback, `--prebuilt`, workflow files | path, bash |
-| `next-forge` | 6 | next-forge monorepo SaaS starter (Turborepo, Clerk, Prisma/Neon, Stripe) | path, bash, import, prompt |
-| `observability` | 6 | Observability — Drains, Web Analytics, Speed Insights, OpenTelemetry | path, bash, prompt |
-| `routing-middleware` | 6 | Routing Middleware — request interception, rewrites, redirects, personalization | path, bash |
-| `runtime-cache` | 6 | Runtime Cache API — ephemeral per-region key-value cache with tag invalidation | path, bash |
-| `shadcn` | 6 | shadcn/ui — CLI, component installation, composition, custom registries, theming | path, bash |
-| `sign-in-with-vercel` | 6 | Sign in with Vercel — OAuth 2.0/OIDC identity provider | path |
-| `vercel-flags` | 6 | Feature flags — dashboard, Flags Explorer, gradual rollouts, A/B testing | path, bash, import |
-| `ai-elements` | 5 | AI Elements — pre-built React components for AI interfaces (chat UIs, tool calls) | path, bash, import, prompt |
-| `nextjs` | 5 | Next.js App Router — routing, Server Components, Server Actions, middleware | path, bash, prompt |
-| `payments` | 5 | Stripe payments — Marketplace setup, checkout sessions, webhooks, subscriptions | path, bash |
-| `turborepo` | 5 | Turborepo — monorepo builds, task caching, remote caching, `--affected` | path, bash |
-| `v0-dev` | 5 | v0 by Vercel — AI code generation, UI from prompts, v0 CLI and SDK | bash, import, prompt |
-| `vercel-firewall` | 5 | Firewall and security — DDoS, WAF, rate limiting, bot filtering, OWASP | path, bash, prompt |
-| `vercel-queues` | 5 | Queues (public beta) — durable event streaming, topics, consumer groups | path, bash, import |
-| `cms` | 4 | Headless CMS — Sanity, Contentful, DatoCMS, Storyblok, Builder.io | path, bash |
-| `email` | 4 | Email — Resend with React Email templates | path, bash |
-| `geist` | 4 | Geist typography — Sans, Mono, Pixel font configuration | path, bash, import |
-| `json-render` | 4 | AI chat response rendering — UIMessage parts, tool calls, streaming states | path |
-| `micro` | 4 | micro — async HTTP microservices framework | bash, import |
-| `ncc` | 4 | @vercel/ncc — compile Node.js modules into a single file | bash, import |
-| `react-best-practices` | 4 | React best-practices reviewer — hooks, a11y, performance, TypeScript patterns | path, import |
-| `satori` | 4 | Satori — HTML/CSS to SVG for dynamic OG images | path, bash, import |
-| `swr` | 4 | SWR — client-side data fetching, caching, revalidation, mutations | path, bash, import, prompt |
-| `turbopack` | 4 | Turbopack — Next.js bundler, HMR, build debugging | path, bash |
-| `vercel-agent` | 4 | Vercel Agent — AI code review, incident investigation, SDK installation | path, bash |
-| `vercel-cli` | 4 | Vercel CLI — deploy, env, link, logs, domains | path, bash, prompt |
-| `vercel-sandbox` | 4 | Sandbox — ephemeral Firecracker microVMs for untrusted code | bash, import, prompt |
-| `agent-browser` | 3 | Browser automation CLI for AI agents | path, bash |
-| `marketplace` | 3 | Marketplace — discovering, installing, and building integrations | path, bash |
-| `agent-browser-verify` | 2 | Automated browser verification for dev servers | bash, prompt |
-
-### Trigger Type Legend
-
-| Trigger | Hook | Matching Method |
-|---------|------|-----------------|
-| **path** | PreToolUse | File glob patterns matched against Read/Edit/Write tool targets |
-| **bash** | PreToolUse | Regex patterns matched against Bash tool commands |
-| **import** | PreToolUse | Package patterns matched against import/require statements in file content |
-| **prompt** | UserPromptSubmit | Phrase/allOf/anyOf/noneOf scoring against user prompt text |
-
----
-
-## Budget & Limit Constants
-
-| Constant | Default | Configurable Via | Description |
-|----------|---------|------------------|-------------|
-| PreToolUse byte budget | 18,000 bytes | `VERCEL_PLUGIN_INJECTION_BUDGET` | Max total skill content per PreToolUse invocation |
-| PreToolUse skill cap | 3 skills | — | Max number of skills injected per PreToolUse |
-| UserPromptSubmit byte budget | 8,000 bytes | `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` | Max total skill content per UserPromptSubmit |
-| UserPromptSubmit skill cap | 2 skills | — | Max skills injected per UserPromptSubmit |
-| SubagentStart (Explore) | ~1,000 bytes | — | Skill names + profile summary only |
-| SubagentStart (Plan) | ~3,000 bytes | — | Summaries + deployment constraints |
-| SubagentStart (general-purpose) | ~8,000 bytes | — | Full skill bodies with summary fallback |
-| TSX review threshold | 3 edits | `VERCEL_PLUGIN_REVIEW_THRESHOLD` | `.tsx` edits before injecting `react-best-practices` |
-| Profiler boost | +5 priority | — | Added to skills listed in `VERCEL_PLUGIN_LIKELY_SKILLS` |
-| vercel.json routing | ±10 priority | — | Added/subtracted based on vercel.json key→skill mappings |
-| Prompt phrase score | +6 | — | Per matching phrase in `promptSignals.phrases` |
-| Prompt allOf score | +4 | — | Per satisfied group in `promptSignals.allOf` |
-| Prompt anyOf score | +1 (cap +2) | — | Per matching term in `promptSignals.anyOf` |
-| Default minScore | 6 | Per-skill `promptSignals.minScore` | Threshold for prompt-based injection |
-| Hook timeout | 5 seconds | — | Maximum execution time for all timed hooks |
-
----
-
-## Cross-References
-
-- **Section 1**: [Architecture Overview](./01-architecture-overview.md) — system diagram, core concepts, hook lifecycle, glossary
-- **Section 2**: [Injection Pipeline Deep-Dive](./02-injection-pipeline.md) — pattern matching, ranking, budget enforcement, prompt signal scoring
-- **Section 3**: [Skill Authoring Guide](./03-skill-authoring.md) — creating, testing, and validating new skills
-- **Section 4**: [Operations & Debugging](./04-operations-debugging.md) — environment variables, log levels, `doctor`/`explain` CLI, dedup troubleshooting
diff --git a/docs/06-runtime-internals.md b/docs/06-runtime-internals.md
deleted file mode 100644
index 551fe1b..0000000
--- a/docs/06-runtime-internals.md
+++ /dev/null
@@ -1,1212 +0,0 @@
-# 6. Runtime Internals Reference
-
-> **Purpose**: Document the internal implementation details — hook I/O contracts, matching semantics, dedup state machines, audit log schemas, subagent coordination, and temp file ownership — at the code level.
->
-> **Audience**: Maintainers and contributors who need to understand the internal mechanics of hook execution, pattern compilation, dedup state machines, subagent coordination, and observability infrastructure.
->
-> **Prerequisites**: [01 Architecture Overview](./01-architecture-overview.md), [02 Injection Pipeline](./02-injection-pipeline.md), and [04 Operations & Debugging](./04-operations-debugging.md).
->
-> **Previous page**: [← Reference](./05-reference.md) · This is the final page in the documentation.
-
-This document covers implementation details that go beyond the pipeline overview in [02-injection-pipeline.md](./02-injection-pipeline.md) and the operational guide in [04-operations-debugging.md](./04-operations-debugging.md). Where those documents explain *what* happens, this document explains *how* and *why* at the code level.
-
----
-
-## Table of Contents
-
-1. [Session Lifecycle: Full Hook Invocation Sequence](#session-lifecycle-full-hook-invocation-sequence)
- - [Hook Registration Summary](#hook-registration-summary)
- - [Hook I/O Contracts](#hook-io-contracts)
-2. [Matching Semantics](#matching-semantics)
- - [Glob-to-Regex Conversion](#glob-to-regex-conversion)
- - [Path Match Fallback Chain](#path-match-fallback-chain)
- - [Import Pattern Compilation and Flags](#import-pattern-compilation-and-flags)
- - [Bash Regex Matching](#bash-regex-matching)
- - [Manifest Pre-compilation (Version 2)](#manifest-pre-compilation-version-2)
-3. [Intent Routing and Priority Arithmetic](#intent-routing-and-priority-arithmetic)
- - [Base Priority Range (4–8)](#base-priority-range-48)
- - [Profiler Boost (+5)](#profiler-boost-5)
- - [Vercel.json Key Routing (±10)](#verceljson-key-routing-10)
- - [Special-Case Boosts](#special-case-boosts)
- - [Ranking Function](#ranking-function)
- - [Budget Enforcement](#budget-enforcement)
- - [Prompt Signal Scoring](#prompt-signal-scoring)
-4. [Scoped Dedup System](#scoped-dedup-system)
- - [Three-Tier State Architecture](#three-tier-state-architecture)
- - [Atomic Claim Protocol](#atomic-claim-protocol)
- - [Scope Isolation for Subagents](#scope-isolation-for-subagents)
- - [State Merge Logic](#state-merge-logic)
- - [Strategy Cascade](#strategy-cascade)
- - [Path Safety and Sanitization](#path-safety-and-sanitization)
-5. [Audit Logging and Observability](#audit-logging-and-observability)
- - [Audit Log File (JSONL)](#audit-log-file-jsonl)
- - [Audit Log JSONL Schema](#audit-log-jsonl-schema)
- - [Structured Logger (stderr)](#structured-logger-stderr)
- - [Log Level Resolution](#log-level-resolution)
- - [Log Event Taxonomy](#log-event-taxonomy)
- - [End-to-End Trace: Single Request Flow](#end-to-end-trace-single-request-flow)
-6. [Subagent Lifecycle](#subagent-lifecycle)
- - [Spawn Observation (PreToolUse → Agent)](#spawn-observation-pretooluse--agent)
- - [Bootstrap Injection (SubagentStart)](#bootstrap-injection-subagentstart)
- - [Stop Sync and Ledger (SubagentStop)](#stop-sync-and-ledger-subagentstopmts)
- - [Fresh-Env Behavior](#fresh-env-behavior)
-7. [Verification Observer](#verification-observer)
- - [Boundary Classification](#boundary-classification)
- - [Route Inference](#route-inference)
- - [Event Structure](#event-structure)
-8. [Post-Write Validation](#post-write-validation)
- - [Validation Pipeline](#validation-pipeline)
- - [File-Hash Dedup](#file-hash-dedup)
-9. [Session Cleanup](#session-cleanup)
-10. [Temp File Inventory](#temp-file-inventory)
-
----
-
-## Session Lifecycle: Full Hook Invocation Sequence
-
-The following diagram shows every hook that fires across a complete session, including the subagent sub-lifecycle. Hooks are registered in `hooks/hooks.json` and executed by the Claude Agent SDK runtime.
-
-```mermaid
-sequenceDiagram
- participant User
- participant Claude as Claude Code
- participant SS as SessionStart Hooks
- participant PTU as PreToolUse Hooks
- participant UPS as UserPromptSubmit Hook
- participant PostTU as PostToolUse Hooks
- participant Sub as Subagent Hooks
- participant SE as SessionEnd Hook
-
- Note over SS: Session begins (startup/resume/clear/compact)
- Claude->>SS: session-start-seen-skills.mjs
- Note right of SS: Initialize VERCEL_PLUGIN_SEEN_SKILLS=""
- Claude->>SS: session-start-profiler.mjs
- Note right of SS: Scan project → set VERCEL_PLUGIN_LIKELY_SKILLS
- Claude->>SS: inject-claude-md.mjs
- Note right of SS: Output vercel.md ecosystem graph (~52KB)
-
- loop Every user message
- User->>Claude: Prompt text
- Claude->>UPS: user-prompt-submit-skill-inject.mjs
- Note right of UPS: Score prompt signals → inject ≤2 skills (8KB)
- end
-
- loop Every Read/Edit/Write/Bash tool call
- Claude->>PTU: pretooluse-skill-inject.mjs
- Note right of PTU: Match patterns → rank → dedup → inject ≤3 skills (18KB)
- end
-
- loop Every Agent tool call
- Claude->>PTU: pretooluse-subagent-spawn-observe.mjs
- Note right of PTU: Record pending launch metadata (JSONL)
- end
-
- loop Every Bash completion
- Claude->>PostTU: posttooluse-shadcn-font-fix.mjs
- Note right of PostTU: Fix shadcn font loading (if applicable)
- Claude->>PostTU: posttooluse-verification-observe.mjs
- Note right of PostTU: Classify boundary → emit structured event
- end
-
- loop Every Write/Edit completion
- Claude->>PostTU: posttooluse-validate.mjs
- Note right of PostTU: Run skill validation rules → return fix instructions
- end
-
- rect rgb(240, 248, 255)
- Note over Sub: Subagent sub-lifecycle
- Claude->>Sub: subagent-start-bootstrap.mjs
- Note right of Sub: Inject context (profiler + skills) scoped by agent_id
- Note over Sub: Subagent runs (own PreToolUse/PostToolUse cycle)
- Claude->>Sub: subagent-stop-sync.mjs
- Note right of Sub: Append to aggregate ledger (JSONL)
- end
-
- Note over SE: Session ends
- Claude->>SE: session-end-cleanup.mjs
- Note right of SE: Delete all session temp files + claim dirs
-```
-
-### Hook Registration Summary
-
-| Event | Hook File | Matcher | Timeout |
-|-------|-----------|---------|---------|
-| SessionStart | `session-start-seen-skills.mjs` | `startup\|resume\|clear\|compact` | — |
-| SessionStart | `session-start-profiler.mjs` | `startup\|resume\|clear\|compact` | — |
-| SessionStart | `inject-claude-md.mjs` | `startup\|resume\|clear\|compact` | — |
-| PreToolUse | `pretooluse-skill-inject.mjs` | `Read\|Edit\|Write\|Bash` | 5 s |
-| PreToolUse | `pretooluse-subagent-spawn-observe.mjs` | `Agent` | 5 s |
-| UserPromptSubmit | `user-prompt-submit-skill-inject.mjs` | *(empty — all prompts)* | 5 s |
-| PostToolUse | `posttooluse-shadcn-font-fix.mjs` | `Bash` | 5 s |
-| PostToolUse | `posttooluse-verification-observe.mjs` | `Bash` | 5 s |
-| PostToolUse | `posttooluse-validate.mjs` | `Write\|Edit` | 5 s |
-| SubagentStart | `subagent-start-bootstrap.mjs` | `.+` | 5 s |
-| SubagentStop | `subagent-stop-sync.mjs` | `.+` | 5 s |
-| SessionEnd | `session-end-cleanup.mjs` | — | — |
-
-All hooks output JSON conforming to `SyncHookJSONOutput` from `@anthropic-ai/claude-agent-sdk`. Observer hooks (spawn-observe, verification-observe, stop-sync) output empty `{}`.
-
-### Hook I/O Contracts
-
-Every hook reads JSON from **stdin** and writes JSON to **stdout**. The Claude Agent SDK provides the stdin envelope; hooks must return a `SyncHookJSONOutput`-conforming object. This section documents the exact shapes for each hook.
-
-#### Common Stdin Fields
-
-Most hooks receive these fields from the SDK (availability varies by event):
-
-| Field | Type | Availability | Description |
-|-------|------|-------------|-------------|
-| `tool_name` | `string` | PreToolUse, PostToolUse | The tool being invoked (`Read`, `Edit`, `Write`, `Bash`, `Agent`) |
-| `tool_input` | `object` | PreToolUse, PostToolUse | Tool-specific arguments (e.g., `file_path`, `command`) |
-| `session_id` | `string?` | All events | Session identifier; fallback: `SESSION_ID` env var |
-| `cwd` | `string?` | PreToolUse, PostToolUse, UserPromptSubmit | Working directory; fallback: `working_directory` field |
-| `agent_id` | `string?` | PreToolUse, SubagentStart, SubagentStop | Agent identifier for subagent-scoped dedup |
-| `prompt` | `string` | UserPromptSubmit | User's prompt text |
-| `agent_type` | `string?` | SubagentStart, SubagentStop | Agent type label (e.g., `general-purpose`, `Explore`) |
-
-#### SessionStart Hooks
-
-**`session-start-seen-skills.mjs`** — No stdin parsing. Appends `export VERCEL_PLUGIN_SEEN_SKILLS=""` to `CLAUDE_ENV_FILE`. No stdout.
-
-**`session-start-profiler.mjs`** — Reads `session_id` from stdin (optional). Scans project files and writes results to `CLAUDE_ENV_FILE`:
-
-```bash
-# Environment variables appended:
-export VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE="0" # or "1"
-export VERCEL_PLUGIN_GREENFIELD="true" # if empty project
-export VERCEL_PLUGIN_LIKELY_SKILLS="nextjs,ai-sdk,vercel-storage"
-export VERCEL_PLUGIN_BOOTSTRAP_HINTS="env-template,readme"
-export VERCEL_PLUGIN_RESOURCE_HINTS="postgres,kv"
-export VERCEL_PLUGIN_SETUP_MODE="1" # if ≥3 bootstrap hints
-```
-
-Also writes a profile cache to `/vercel-plugin--profile.json`:
-
-```json
-{
- "projectRoot": "/path/to/project",
- "likelySkills": ["nextjs", "ai-sdk"],
- "greenfield": false,
- "bootstrapHints": ["env-template"],
- "resourceHints": ["postgres"],
- "setupMode": false,
- "agentBrowserAvailable": true,
- "timestamp": "2026-03-10T12:00:00.000Z"
-}
-```
-
-**`inject-claude-md.mjs`** — No stdin. Writes plain markdown text to stdout (the `vercel.md` ecosystem graph, ~52 KB). If `VERCEL_PLUGIN_GREENFIELD=true`, appends a greenfield guidance section.
-
-#### PreToolUse: `pretooluse-skill-inject.mjs`
-
-**Stdin:**
-
-```json
-{
- "tool_name": "Read",
- "tool_input": {
- "file_path": "src/app/page.tsx",
- "content": "..."
- },
- "session_id": "abc-123",
- "cwd": "/path/to/project",
- "agent_id": "def-456"
-}
-```
-
-For Bash tools, `tool_input.command` replaces `file_path`. The `agent_id` field is present only for subagents.
-
-**Stdout (skill injected):**
-
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "PreToolUse",
- "additionalContext": "\n...skill body...\n\n"
- }
-}
-```
-
-**Stdout (no match):**
-
-```json
-{}
-```
-
-**Embedded metadata comment** (inside `additionalContext`):
-
-```json
-{
- "version": 1,
- "toolName": "Read",
- "toolTarget": "src/app/page.tsx",
- "matchedSkills": ["nextjs", "react-best-practices"],
- "injectedSkills": ["nextjs"],
- "summaryOnly": [],
- "droppedByCap": [],
- "droppedByBudget": ["react-best-practices"],
- "reasons": {
- "nextjs": {
- "trigger": "pattern-match",
- "reasonCode": "pathPattern matched: app/**/page.tsx"
- }
- }
-}
-```
-
-For Bash tool calls, `toolTarget` is redacted for security.
-
-#### PreToolUse: `pretooluse-subagent-spawn-observe.mjs`
-
-**Stdin:**
-
-```json
-{
- "tool_name": "Agent",
- "tool_input": {
- "name": "researcher",
- "description": "Research API patterns",
- "prompt": "Find all API routes...",
- "subagent_type": "Explore",
- "resume": null
- },
- "session_id": "abc-123"
-}
-```
-
-**Stdout:** Always `{}` (observer only). Records a pending launch to `/vercel-plugin--pending-launches.jsonl`.
-
-#### UserPromptSubmit: `user-prompt-submit-skill-inject.mjs`
-
-**Stdin:**
-
-```json
-{
- "prompt": "my deploy keeps failing with a timeout error",
- "session_id": "abc-123",
- "cwd": "/path/to/project"
-}
-```
-
-Prompts shorter than 10 characters are rejected immediately.
-
-**Stdout (skill injected):**
-
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "UserPromptSubmit",
- "additionalContext": "\n...body...\n\n"
- }
-}
-```
-
-**Stdout (no match):** `{}`
-
-#### PostToolUse: `posttooluse-verification-observe.mjs`
-
-**Stdin:**
-
-```json
-{
- "tool_name": "Bash",
- "tool_input": { "command": "curl http://localhost:3000/api/users" },
- "session_id": "abc-123",
- "cwd": "/path/to/project"
-}
-```
-
-**Stdout:** Always `{}` (observer only). Emits a `verification.boundary_observed` log event to stderr.
-
-#### PostToolUse: `posttooluse-validate.mjs`
-
-**Stdin:**
-
-```json
-{
- "tool_name": "Write",
- "tool_input": { "file_path": "src/app/api/route.ts" },
- "session_id": "abc-123",
- "cwd": "/path/to/project"
-}
-```
-
-**Stdout (violations found):**
-
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "PostToolUse",
- "additionalContext": "⚠️ Validation issues...\n"
- }
-}
-```
-
-**Stdout (no violations):** `{}`
-
-#### SubagentStart: `subagent-start-bootstrap.mjs`
-
-**Stdin:**
-
-```json
-{
- "session_id": "abc-123",
- "cwd": "/path/to/project",
- "agent_id": "def-456",
- "agent_type": "general-purpose"
-}
-```
-
-**Stdout:**
-
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "SubagentStart",
- "additionalContext": "\n...profile + skill content..."
- }
-}
-```
-
-Content is budget-scaled by agent type: Explore (~1 KB), Plan (~3 KB), general-purpose (~8 KB).
-
-#### SubagentStop: `subagent-stop-sync.mjs`
-
-**Stdin:**
-
-```json
-{
- "session_id": "abc-123",
- "agent_id": "def-456",
- "agent_type": "general-purpose",
- "agent_transcript_path": "/path/to/transcript"
-}
-```
-
-**Stdout:** No output. Appends a ledger entry to `/vercel-plugin--subagent-ledger.jsonl`.
-
-#### SessionEnd: `session-end-cleanup.mjs`
-
-**Stdin:** `{ "session_id": "abc-123" }` (optional)
-
-**Stdout:** No output. Deletes all `vercel-plugin--*` files/dirs from `tmpdir()`.
-
----
-
-## Matching Semantics
-
-The injection engine uses three distinct pattern types. Each compiles differently and has different matching behavior.
-
-### Glob-to-Regex Conversion
-
-**Source**: `hooks/src/patterns.mts` → `globPatternToRegexSource()`
-
-The plugin uses a custom glob-to-regex converter (no external dependencies). Conversion rules:
-
-| Glob Token | Regex Output | Semantics |
-|------------|-------------|-----------|
-| `*` | `[^/]*` | Any characters except path separators |
-| `**/` | `(?:[^/]+/)*` | Zero or more path segments (with boundary) |
-| `**` (at end) | `.*` | Everything including slashes |
-| `?` | `[^/]` | Single non-slash character |
-| `{a,b,c}` | `(?:a\|b\|c)` | Alternation (requires commas) |
-| `.` `(` `)` `+` etc. | `\.` `\(` `\)` `\+` | Escaped metacharacters |
-
-The final regex is anchored: `^…pattern…$` (full-path match).
-
-**Edge cases**:
-- Empty patterns are rejected with an error
-- Brace groups without commas are treated as literal `{}`
-- Nested braces are handled recursively
-
-### Path Match Fallback Chain
-
-**Source**: `hooks/src/patterns.mts` → `matchPathWithReason()`
-
-Path matching uses a three-step fallback strategy, stopping at the first hit:
-
-```
-1. Full-path match e.g., "src/components/Button.tsx" vs "src/**/*.tsx"
- ↓ (no match)
-2. Basename-only match e.g., "Button.tsx" vs "*.tsx"
- ↓ (no match)
-3. Suffix segment scan e.g., try "Button.tsx", then "components/Button.tsx", etc.
-```
-
-All paths are normalized (backslashes → forward slashes) before matching.
-
-### Import Pattern Compilation and Flags
-
-**Source**: `hooks/src/patterns.mts` → `importPatternToRegex()`
-
-Import patterns start as package names (e.g., `@vercel/postgres`) and compile to a regex that matches ESM, CommonJS, and dynamic imports:
-
-```
-(?:from\s+|require\s*\(\s*|import\s*\(\s*)['"](?:/[^'"]*)?['"]
-```
-
-This matches:
-- ESM: `from 'package'` or `from "package"`
-- CommonJS: `require('package')` or `require( 'package' )`
-- Dynamic: `import('package')` or `import( 'package' )`
-- Subpaths: `'package/subpath'` (via optional `(?:/[^'"]*)?`)
-
-**Flags**: Hardcoded `"m"` (multiline) — enables `^`/`$` to match line boundaries. Case-sensitive by default.
-
-Wildcards in patterns (`*`) expand to `[^'"]*` (any non-quote characters).
-
-### Bash Regex Matching
-
-**Source**: `hooks/src/patterns.mts` → `matchBashWithReason()`
-
-Bash patterns are **raw JavaScript RegExp strings** — no transformation or escaping. They are passed directly to `new RegExp(p)`.
-
-- Single-pass: tests each pattern against the full command string
-- Returns on first match — no fallback strategies (unlike path matching)
-- Invalid regex syntax throws at compile time
-
-### Manifest Pre-compilation (Version 2)
-
-**Source**: `scripts/build-manifest.ts` → `compileRegexSources()`
-
-The manifest pre-compiles all patterns at build time to avoid runtime regex compilation in the hot path. The version 2 format uses **paired arrays** — index `i` in the pattern array corresponds to index `i` in the regex-source array:
-
-```json
-{
- "version": 2,
- "skills": {
- "skill-slug": {
- "priority": 6,
- "pathPatterns": ["**/*.tsx"],
- "pathRegexSources": ["^(?:[^/]+\\/)*[^/]*\\.tsx$"],
- "bashPatterns": ["npm run dev"],
- "bashRegexSources": ["npm run dev"],
- "importPatterns": ["next"],
- "importRegexSources": [{ "source": "...", "flags": "m" }]
- }
- }
-}
-```
-
-If a pattern fails to compile, it is dropped from **both** arrays together, preventing index desynchronization.
-
-At runtime, `compileSkillPatterns(skillMap)` creates `CompiledPattern { pattern: string, regex: RegExp }` objects from the manifest. Compilation errors are reported via callbacks (`onPathGlobError`, `onBashRegexError`, `onImportPatternError`) but never crash the hook.
-
----
-
-## Intent Routing and Priority Arithmetic
-
-Every matched skill receives an **effective priority** computed from its base priority plus contextual adjustments. The following table shows all modifiers:
-
-| Mechanism | Adjustment | Scope | Condition |
-|-----------|-----------|-------|-----------|
-| Base priority | 4–8 | All skills | Static in `SKILL.md` frontmatter |
-| Profiler boost | **+5** | `VERCEL_PLUGIN_LIKELY_SKILLS` | Skill detected by project profiler |
-| Vercel.json (match) | **+10** | 4 routing skills | Skill's key found in `vercel.json` |
-| Vercel.json (no match) | **−10** | 4 routing skills | Skill's key absent from `vercel.json` |
-| Setup-mode bootstrap | **+50** | `bootstrap` skill | Greenfield or ≥3 bootstrap hints |
-| TSX review trigger | **+40** | `react-best-practices` | After N `.tsx` edits (default 3) |
-| Dev-server verify | **+45** | `agent-browser-verify` | Dev server command detected |
-
-### Base Priority Range (4–8)
-
-Every skill declares a `metadata.priority` between 4 and 8 in its `SKILL.md` frontmatter. Higher values indicate the skill is more broadly useful or more critical to inject early.
-
-### Profiler Boost (+5)
-
-**Source**: `hooks/src/session-start-profiler.mts`
-
-At SessionStart, the profiler scans the project for:
-
-1. **File markers** (14 patterns): `next.config.js/mjs/ts/mts`, `turbo.json`, `vercel.json`, `.mcp.json`, `middleware.ts/js`, `components.json`, `.env.local`, `pnpm-workspace.yaml`
-2. **Package dependencies** (~20 mapped): `next`, `ai`, `@ai-sdk/*`, `@vercel/*` (blob, kv, postgres, edge-config, analytics, speed-insights, flags, workflow, queue, sandbox, sdk), `turbo`, `@t3-oss/env-nextjs`
-3. **Vercel.json keys**: `crons`, `rewrites`, `redirects`, `headers`, `functions`
-4. **Bootstrap signals** (11 patterns): env templates, README, drizzle/prisma configs, setup scripts, auth/resource dependencies
-
-Results are written to:
-- `VERCEL_PLUGIN_LIKELY_SKILLS` — comma-delimited skill list
-- `VERCEL_PLUGIN_GREENFIELD=true` — if project is empty/new
-- `VERCEL_PLUGIN_BOOTSTRAP_HINTS` — setup signal names
-- `VERCEL_PLUGIN_RESOURCE_HINTS` — resource dependency names
-- `VERCEL_PLUGIN_SETUP_MODE=1` — if ≥3 bootstrap hints detected
-
-The `observability` skill is always added for non-greenfield projects.
-
-### Vercel.json Key Routing (±10)
-
-**Source**: `hooks/src/vercel-config.mts`
-
-Only applies to four skills: `cron-jobs`, `deployments-cicd`, `routing-middleware`, `vercel-functions`.
-
-The key-to-skill mapping:
-
-| vercel.json Key | Skill |
-|----------------|-------|
-| `redirects`, `rewrites`, `headers`, `cleanUrls`, `trailingSlash` | `routing-middleware` |
-| `crons` | `cron-jobs` |
-| `functions`, `regions` | `vercel-functions` |
-| `builds`, `buildCommand`, `installCommand`, `outputDirectory`, `framework`, `devCommand`, `ignoreCommand` | `deployments-cicd` |
-
-If the skill's associated key **exists** in `vercel.json` → +10. If the skill is one of the four routing skills but its key is **absent** → −10.
-
-### Special-Case Boosts
-
-- **Setup-mode bootstrap (+50)**: When `VERCEL_PLUGIN_SETUP_MODE=1`, the `bootstrap` skill receives `max(basePriority + 50, maxOtherPriority + 1)`, ensuring it always ranks first.
-- **TSX review trigger (+40)**: After `VERCEL_PLUGIN_REVIEW_THRESHOLD` (default 3) `.tsx` edits, injects `react-best-practices` with a +40 boost. Counter resets after injection.
-- **Dev-server verify (+45)**: On `npm run dev`, `next dev`, `vercel dev`, etc., injects `agent-browser-verify` + `verification` companion. Capped at 2 injections per session (loop guard).
-- **Vercel env help**: One-time injection when `vercel env add/update/pull` commands are detected.
-
-### Ranking Function
-
-**Source**: `hooks/src/patterns.mts` → `rankEntries()`
-
-After all priority adjustments:
-
-```
-Sort by: effectivePriority DESC → base priority DESC → skill name ASC (tiebreaker)
-```
-
-### Budget Enforcement
-
-Two independent budgets:
-
-| Hook | Max Skills | Max Bytes | Env Override |
-|------|-----------|-----------|-------------|
-| PreToolUse | 5 | 18,000 | `VERCEL_PLUGIN_INJECTION_BUDGET` |
-| UserPromptSubmit | 2 | 8,000 | `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` |
-
-Enforcement logic in `injectSkills()`:
-1. The first matched skill is **always** injected in full (never dropped)
-2. Subsequent skills are checked against remaining budget
-3. If full body exceeds budget but a `summary` exists → inject summary instead (wrapped in `` markers)
-4. If neither fits → skill is dropped entirely
-
-Return metadata categorizes every matched skill into: `loaded`, `summaryOnly`, `droppedByCap`, or `droppedByBudget`.
-
-### Prompt Signal Scoring
-
-**Source**: `hooks/src/prompt-patterns.mts` → `matchPromptWithReason()`
-
-Prompt text is normalized (lowercased, contractions expanded, whitespace collapsed) then scored:
-
-| Signal Type | Points | Semantics |
-|-------------|--------|-----------|
-| `phrases` | **+6** each | Exact substring match (case-insensitive) |
-| `allOf` | **+4** per group | All terms in group must match |
-| `anyOf` | **+1** each, **cap +2** | Any term matches, capped total |
-| `noneOf` | **−∞** | Hard suppress (score → `-Infinity`) |
-
-Default `minScore`: 6. A skill is matched if `score >= minScore`.
-
-**Lexical fallback** (`scorePromptWithLexical()`): If exact scoring fails to reach threshold, a lexical skill index is searched with an adaptive boost multiplier:
-- `1.5×` if exact score = 0 (no signal overlap at all)
-- `1.35×` if exact score > 0 but < `minScore/2` (weak signal)
-- `1.1×` if exact score ≥ `minScore/2` but < `minScore` (near-threshold)
-
-**Troubleshooting intent routing** (`classifyTroubleshootingIntent()`): Three detection families:
-- **Browser-only** (blank page, white screen, console errors) → `agent-browser-verify` + `investigation-mode`
-- **Flow-verification** ("X but Y" patterns — loads but, submits but) → `verification`
-- **Stuck-investigation** (hung, frozen, timeout, spinning) → `investigation-mode`
-
-Test framework mentions (`jest|vitest|playwright test|cypress test|mocha|karma|testing library`) suppress all verification-family skills.
-
----
-
-## Scoped Dedup System
-
-The dedup system prevents the same skill from being injected twice in a session. It must handle concurrent hook invocations, subagent isolation, and env-var race conditions.
-
-### Three-Tier State Architecture
-
-```mermaid
-graph TD
- A["Env Var
VERCEL_PLUGIN_SEEN_SKILLS
(comma-delimited)"] --> D[mergeSeenSkillStates]
- B["Session File
<tmpdir>/vercel-plugin-<sid>-seen-skills.txt
(comma-delimited snapshot)"] --> D
- C["Claim Dir
<tmpdir>/vercel-plugin-<sid>-seen-skills.d/
(one empty file per skill)"] --> D
- D --> E["Union of all three
(canonical seen-skills set)"]
-```
-
-| Tier | Storage | Persistence | Concurrency |
-|------|---------|-------------|-------------|
-| Env var | `VERCEL_PLUGIN_SEEN_SKILLS` | Process lifetime | Race-prone across hooks |
-| Session file | `…-seen-skills.txt` | Disk (session-scoped) | Overwrite-last-wins |
-| Claim dir | `…-seen-skills.d/` | Disk (session-scoped) | **Atomic** via O_EXCL |
-
-### Atomic Claim Protocol
-
-**Source**: `hooks/src/hook-env.mts` → `tryClaimSessionKey()`
-
-```typescript
-openSync(path, "wx") // O_CREAT | O_EXCL — fails if file exists
-```
-
-- Creates an empty file named `encodeURIComponent(skillSlug)` inside the claim dir
-- Returns `true` on success (skill claimed), `false` on EEXIST (already claimed)
-- Prevents concurrent hook invocations from double-injecting the same skill
-- `syncSessionFileFromClaims()` reads the claim dir and writes a comma-delimited snapshot to the session file
-
-### Scope Isolation for Subagents
-
-Each agent gets its own dedup scope identified by `agent_id` (or `"main"` for the lead agent):
-
-```
-Lead agent: vercel-plugin--seen-skills.d/
-Subagent A: vercel-plugin---seen-skills.d/
-Subagent B: vercel-plugin---seen-skills.d/
-```
-
-This means:
-- Subagents can re-inject skills that the lead agent already claimed
-- Sibling subagents can re-inject each other's skills
-- Only claims within the same scope prevent re-injection
-
-### State Merge Logic
-
-**Source**: `hooks/src/patterns.mts` → `mergeScopedSeenSkillStates()`
-
-```
-if (scopeId === "main"):
- merge(envValue, fileValue, claimValue) // All three tiers
-else:
- merge(fileValue, claimValue) // Exclude parent env var
-```
-
-Subagents exclude the env var because it contains the **parent's** accumulated state — using it would suppress skills the subagent hasn't seen yet.
-
-### Strategy Cascade
-
-The dedup system selects a strategy based on available infrastructure:
-
-```
-1. "file" — Atomic claim dir (preferred, survives across invocations)
- ↓ (tmpdir unavailable or errors)
-2. "env-var" — VERCEL_PLUGIN_SEEN_SKILLS only (fallback, race-prone)
- ↓ (env var unavailable)
-3. "memory-only" — In-process Set (single invocation only)
- ↓ (explicit opt-out)
-4. "disabled" — VERCEL_PLUGIN_HOOK_DEDUP=off (no dedup at all)
-```
-
-Strategy selection is logged at `debug` level for diagnostics.
-
-### Path Safety and Sanitization
-
-All temp file paths are validated:
-1. Session IDs matching `^[a-zA-Z0-9_-]+$` are used directly
-2. Unsafe IDs are SHA256-hashed to prevent path traversal
-3. Skill keys in claim filenames use `encodeURIComponent()`
-4. Resolved paths are verified to stay within `tmpdir()` — an error is thrown on escape attempts
-
-All dedup file I/O functions **swallow errors silently** (try/catch, stderr logging only at debug level). This prevents hook timeouts from transient filesystem issues.
-
----
-
-## Audit Logging and Observability
-
-Two independent logging systems capture runtime behavior: a persistent **audit log file** (JSONL on disk) and ephemeral **structured logging** (JSON to stderr). This section documents both systems, provides the formal JSONL schema, and catalogs every log event.
-
-### Audit Log File (JSONL)
-
-**Source**: `hooks/src/hook-env.mts` → `appendAuditLog()`
-
-A JSONL file (one JSON object per line) recording every skill injection decision. This is the only persistent record of plugin behavior across sessions.
-
-**Path resolution priority**:
-1. `VERCEL_PLUGIN_AUDIT_LOG_FILE` env var (relative to project root)
-2. If set to `"off"` → disabled
-3. Default: `~/.claude/projects//vercel-plugin/skill-injections.jsonl`
-
-The parent directory is created automatically (`mkdirSync` with `recursive: true`). Write errors are logged to stderr but never propagate — audit logging is best-effort.
-
-#### Audit Log JSONL Schema
-
-Every line is a self-contained JSON object. The `timestamp` field is always injected by `appendAuditLog()`.
-
-**PreToolUse record** (`event: "skill-injection"`):
-
-```json
-{
- "timestamp": "2026-03-10T12:00:00.000Z",
- "event": "skill-injection",
- "toolName": "Read",
- "toolTarget": "src/app/page.tsx",
- "matchedSkills": ["nextjs", "react-best-practices"],
- "injectedSkills": ["nextjs"],
- "summaryOnly": [],
- "droppedByCap": [],
- "droppedByBudget": ["react-best-practices"]
-}
-```
-
-**UserPromptSubmit record** (`event: "prompt-skill-injection"`):
-
-```json
-{
- "timestamp": "2026-03-10T12:01:00.000Z",
- "event": "prompt-skill-injection",
- "hookEvent": "UserPromptSubmit",
- "matchedSkills": ["deployments-cicd", "vercel-functions"],
- "injectedSkills": ["deployments-cicd"],
- "summaryOnly": [],
- "droppedByCap": ["vercel-functions"],
- "droppedByBudget": []
-}
-```
-
-**Field reference:**
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `timestamp` | `string` (ISO 8601) | When the record was written |
-| `event` | `string` | Record type: `"skill-injection"` (PreToolUse) or `"prompt-skill-injection"` (UserPromptSubmit) |
-| `toolName` | `string` | Tool that triggered injection (`Read`, `Edit`, `Write`, `Bash`) — PreToolUse only |
-| `toolTarget` | `string` | File path or redacted command — PreToolUse only. Bash commands are always `"[redacted]"` |
-| `hookEvent` | `string` | `"UserPromptSubmit"` — UserPromptSubmit only |
-| `matchedSkills` | `string[]` | All skills whose patterns/signals matched |
-| `injectedSkills` | `string[]` | Skills whose full body was injected |
-| `summaryOnly` | `string[]` | Skills injected as summary (over budget but summary fits) |
-| `droppedByCap` | `string[]` | Skills dropped by the per-invocation cap (5 PreToolUse, 2 UserPromptSubmit) |
-| `droppedByBudget` | `string[]` | Skills dropped because neither body nor summary fits within remaining budget |
-
-**Analyzing audit logs:**
-
-```bash
-# Count injections per skill
-cat ~/.claude/projects/*/vercel-plugin/skill-injections.jsonl | \
- jq -r '.injectedSkills[]' | sort | uniq -c | sort -rn
-
-# Find budget-dropped skills
-cat ~/.claude/projects/*/vercel-plugin/skill-injections.jsonl | \
- jq 'select(.droppedByBudget | length > 0)'
-
-# Injection timeline for a specific tool target
-cat ~/.claude/projects/*/vercel-plugin/skill-injections.jsonl | \
- jq 'select(.toolTarget == "src/app/page.tsx")'
-```
-
-### Structured Logger (stderr)
-
-**Source**: `hooks/src/logger.mts`
-
-All hooks emit structured JSON to stderr at configurable verbosity levels. Logs are ephemeral — they only exist while the process runs.
-
-**Log line format**:
-```json
-{
- "invocationId": "a3f1c02e",
- "event": "decision:match",
- "timestamp": "2026-03-10T12:00:00.000Z",
- "skill": "nextjs",
- "matchType": "path",
- "pattern": "**/*.tsx"
-}
-```
-
-The `invocationId` (8-char hex) is shared across all hooks in the same process (stored in `globalThis`), enabling correlation of events within a single hook invocation.
-
-**Logger methods**:
-
-| Method | Min Level | Use Case |
-|--------|----------|----------|
-| `summary(event, data)` | `summary` | High-level injection decisions |
-| `complete(reason, counts, timing)` | `summary` | End-of-hook summary with counts |
-| `debug(event, data)` | `debug` | Match reasons, dedup decisions, priority adjustments |
-| `trace(event, data)` | `trace` | Per-pattern evaluation details |
-| `issue(code, message, hint, ctx)` | `summary` | Errors and warnings with fix hints |
-
-**CompleteCounts fields**: `matchedCount`, `injectedCount`, `dedupedCount`, `cappedCount`, `tsxReviewTriggered`, `devServerVerifyTriggered`, `matchedSkills`, `injectedSkills`, `droppedByCap`, `droppedByBudget`, `boostsApplied`.
-
-### Log Level Resolution
-
-**Source**: `hooks/src/logger.mts` → `resolveLogLevel()`
-
-```
-1. VERCEL_PLUGIN_LOG_LEVEL env var (explicit: "off" | "summary" | "debug" | "trace")
-2. VERCEL_PLUGIN_DEBUG=1 → "debug" (legacy)
-3. VERCEL_PLUGIN_HOOK_DEBUG=1 → "debug" (legacy)
-4. Default: "off"
-```
-
-Hierarchy: `off` < `summary` < `debug` < `trace`. Each level includes all events from lower levels.
-
-### Log Event Taxonomy
-
-Every structured log event emitted by the plugin, organized by source hook and level. All events include the standard `invocationId`, `event`, and `timestamp` fields.
-
-#### PreToolUse (`pretooluse-skill-inject`)
-
-| Event | Level | Payload Fields | Description |
-|-------|-------|---------------|-------------|
-| `complete` | summary | `reason`, `matchedCount`, `injectedCount`, `dedupedCount`, `cappedCount`, `matchedSkills`, `injectedSkills`, `droppedByCap`, `droppedByBudget`, `boostsApplied`, `elapsed_ms`, `timing_ms` | End-of-hook summary |
-| `issue` | summary | `code`, `message`, `hint`, `context` | Error or warning (see issue codes below) |
-| `input-parsed` | debug | `toolName`, `sessionId`, `cwd`, `scopeId` | Stdin successfully parsed |
-| `tool-target` | debug | `toolName`, `target` | Tool target identified (Bash commands redacted) |
-| `manifest-loaded` | debug | `path`, `generatedAt`, `version` | Manifest loaded from disk |
-| `skillmap-loaded` | debug | `skillCount` | Skill map built (with or without manifest) |
-| `likely-skills` | debug | `skills` | Profiler-detected skills |
-| `setup-mode` | debug | `active`, `bootstrapSkill` | Setup mode status |
-| `dedup-strategy` | debug | `strategy`, `sessionId`, `seenEnv` | Dedup strategy selected |
-| `decision:match` | debug | `hook`, `skill`, `score`, `reason` | A skill matched the trigger |
-| `decision:dedup` | debug | `hook`, `skill`, `reason` | A skill was skipped (already seen) |
-| `decision:boost` | debug | `hook`, `skill`, `score`, `reason` | A priority boost was applied |
-| `decision:budget` | debug | `hook`, `skill`, `reason` | A skill was dropped/summarized for budget |
-| `decision:suppress` | debug | `hook`, `skill`, `reason` | A skill was suppressed (e.g., noneOf) |
-| `tsx-edit-count` | debug | `count`, `threshold` | Current TSX edit counter |
-| `tsx-review-triggered` | debug | `count` | TSX review threshold reached |
-| `tsx-review-not-fired` | debug | `count`, `threshold`, `reason` | TSX review not triggered |
-| `dev-server-verify-triggered` | debug | `command`, `iteration` | Dev server detection fired |
-| `dev-server-verify-not-fired` | debug | `reason`, `iteration` | Dev server detection skipped |
-| `pattern:test` | trace | `skill`, `patternType`, `pattern`, `input`, `matched` | Individual pattern evaluation |
-
-#### UserPromptSubmit (`user-prompt-submit-skill-inject`)
-
-| Event | Level | Payload Fields | Description |
-|-------|-------|---------------|-------------|
-| `complete` | summary | `reason`, `matchedCount`, `injectedCount`, `dedupedCount`, `cappedCount`, `elapsed_ms`, `timing_ms` | End-of-hook summary |
-| `stdin-empty` | debug | — | No stdin received |
-| `prompt-too-short` | debug | `length` | Prompt under 10 chars |
-| `input-parsed` | debug | `sessionId`, `cwd`, `promptLength` | Stdin successfully parsed |
-| `normalized-prompt-empty` | debug | — | Prompt empty after normalization |
-| `prompt-matches` | debug | `totalWithSignals`, `matched` (array of `{skill, score}`) | Skills that met minScore |
-| `prompt-dedup` | debug | `rankedSkills`, `droppedByCap`, `previouslyInjected` | Post-dedup skill list |
-| `prompt-selection` | debug | `selectedSkills`, `droppedByCap`, `droppedByBudget`, `dedupStrategy`, `filteredByDedup`, `budgetBytes`, `timingMs` | Final selection |
-| `decision:troubleshooting_intent_routed` | debug | `intent`, `skills`, `reason` | Troubleshooting classifier matched |
-| `decision:verification_family_suppressed` | debug | `reason` | Test framework detected, verification skills suppressed |
-| `decision:investigation_intent_detected` | debug | `skills` (array of `{skill, score}`) | Investigation intent detected |
-| `decision:companion_selected` | debug | `skill`, `companion`, `reason` | Investigation companion chosen |
-| `prompt:score` | debug | `skill`, `score`, `breakdown` | Scoring breakdown for a skill |
-| `prompt-signal-eval` | trace | `skill`, `matched`, `score`, `reason` | Per-skill signal evaluation |
-| `prompt-analysis-full` | trace | (full `PromptAnalysisReport`) | Complete analysis report |
-
-#### PostToolUse (`posttooluse-verification-observe`)
-
-| Event | Level | Payload Fields | Description |
-|-------|-------|---------------|-------------|
-| `verification.boundary_observed` | summary | `boundary`, `verificationId`, `command`, `matchedPattern`, `inferredRoute`, `timestamp` | Bash command classified as verification boundary |
-| `complete` | summary | `reason`, `matchedCount`, `injectedCount` | End-of-hook summary |
-| `verification-observe-skip` | debug | `reason`, `command` | No boundary match or no bash input |
-
-#### PostToolUse (`posttooluse-validate`)
-
-| Event | Level | Payload Fields | Description |
-|-------|-------|---------------|-------------|
-| `posttooluse-validate-output` | summary | `filePath`, `matchedSkills`, `errorCount`, `warnCount` | Validation produced output |
-| `complete` | summary | `reason`, `matchedCount`, `injectedCount` | End-of-hook summary |
-| `posttooluse-validate-skip` | debug | `reason`, `toolName`, `filePath`, `hash`, `sessionId` | Validation skipped (various reasons) |
-| `posttooluse-validate-input` | debug | `toolName`, `filePath`, `sessionId` | Input parsed |
-| `posttooluse-validate-loaded` | debug | `totalSkills`, `skillsWithRules` | Validation rules loaded |
-| `posttooluse-validate-matched` | debug | `matchedSkills` | Skills matched for validation |
-| `posttooluse-validate-violations` | debug | `total`, `errors`, `warns` | Violation counts |
-| `posttooluse-validate-no-output` | debug | `reason` | No actionable violations |
-| `posttooluse-validate-match` | trace | `skill`, `matchType`, `pattern` | Individual skill-file match |
-| `posttooluse-validate-rule-skip` | trace | `skill`, `pattern`, `reason` | Rule skipped (skipIfFileContains matched) |
-| `posttooluse-validate-regex-fail` | debug | `skill`, `pattern` | Validation regex failed to compile |
-
-#### SubagentStart (`subagent-start-bootstrap`)
-
-| Event | Level | Payload Fields | Description |
-|-------|-------|---------------|-------------|
-| `subagent-start-bootstrap:complete` | summary | `agent_id`, `agent_type`, `claimed_skills`, `budget_used`, `budget_max`, `budget_category`, `pending_launch_matched` | Bootstrap finished |
-| `subagent-start-bootstrap` | debug | `agentId`, `agentType`, `sessionId` | Bootstrap started |
-| `subagent-start-bootstrap:profile-cache-hit` | debug | `sessionId`, `skills` | Profiler cache found |
-| `subagent-start-bootstrap:profile-cache-miss` | debug | `sessionId` | Profiler cache not found |
-| `subagent-start-bootstrap:prompt-skill-match` | debug | `promptLength`, `matchedSkills` | Prompt scored against skill signals |
-| `subagent-start-bootstrap:pending-launch` | debug | `sessionId`, `agentType`, `claimedLaunch`, `promptMatchedSkills`, `likelySkills` | Pending launch routing result |
-| `subagent-start-bootstrap:dedup-claims` | debug | `sessionId`, `agentId`, `scopeId`, `claimed` | Skills claimed for subagent scope |
-
-#### SubagentStop (`subagent-stop-sync`)
-
-| Event | Level | Payload Fields | Description |
-|-------|-------|---------------|-------------|
-| `subagent-stop-sync:complete` | summary | `agent_id`, `agent_type`, `skills_injected`, `ledger_entry_written` | Stop sync finished |
-| `subagent-stop-sync` | debug | `sessionId`, `agentId`, `agentType` | Stop sync started |
-
-#### PreToolUse (`pretooluse-subagent-spawn-observe`)
-
-| Event | Level | Payload Fields | Description |
-|-------|-------|---------------|-------------|
-| `pretooluse-subagent-spawn-observe-recorded` | debug | `sessionId`, `subagentType`, `name` | Pending launch recorded |
-| `pretooluse-subagent-spawn-observe-skip` | debug | `reason`, `toolName` | Observation skipped |
-
-#### Issue Codes
-
-Issue events (emitted at `summary` level via `logger.issue()`) use these codes:
-
-| Code | Hook | Meaning |
-|------|------|---------|
-| `STDIN_EMPTY` | PreToolUse | No data on stdin |
-| `STDIN_PARSE_FAIL` | PreToolUse | stdin is not valid JSON |
-| `SKILLMD_PARSE_FAIL` | PreToolUse | SKILL.md YAML frontmatter failed to parse |
-| `SKILLMAP_VALIDATE_FAIL` | PreToolUse | Skill map validation found errors |
-| `SKILLMAP_LOAD_FAIL` | PreToolUse | Could not load skill map from manifest or filesystem |
-| `SKILLMAP_EMPTY` | PreToolUse | Skill map loaded but contains zero skills |
-| `PATH_REGEX_COMPILE_FAIL` | PreToolUse | Pre-compiled path regex failed to construct |
-| `BASH_REGEX_COMPILE_FAIL` | PreToolUse | Pre-compiled bash regex failed to construct |
-| `IMPORT_REGEX_COMPILE_FAIL` | PreToolUse | Pre-compiled import regex failed to construct |
-| `PATH_GLOB_INVALID` | PreToolUse | Path glob pattern is invalid |
-| `BASH_REGEX_INVALID` | PreToolUse | Bash regex pattern is invalid |
-| `IMPORT_PATTERN_INVALID` | PreToolUse | Import pattern is invalid |
-| `DEDUP_CLAIM_FAIL` | PreToolUse, UserPromptSubmit | Could not create claim file (permissions, disk full) |
-
-For full error descriptions with common causes, fix steps, and verification commands, see [04-operations-debugging.md § Error Catalog](./04-operations-debugging.md#error-catalog). For symptom-based troubleshooting (skill not injecting, prompt signal mismatch, wrong subagent context, audit log issues, manifest drift), see [04-operations-debugging.md § Troubleshooting Playbooks](./04-operations-debugging.md#symptom-based-troubleshooting-playbooks).
-
-### End-to-End Trace: Single Request Flow
-
-This diagram shows every log event emitted during a single PreToolUse invocation where one skill is injected, from stdin parse through stdout write:
-
-```mermaid
-sequenceDiagram
- participant SDK as Claude Agent SDK
- participant Hook as pretooluse-skill-inject
- participant Log as stderr (JSONL)
- participant Audit as Audit Log File
- participant FS as Dedup Claim Dir
-
- SDK->>Hook: stdin JSON (tool_name, tool_input, session_id)
-
- Hook->>Log: debug: "input-parsed" {toolName, sessionId, cwd}
- Hook->>Log: debug: "tool-target" {toolName, target}
- Hook->>Log: debug: "manifest-loaded" {path, version}
- Hook->>Log: debug: "skillmap-loaded" {skillCount: 46}
- Hook->>Log: debug: "likely-skills" {skills: [...]}
- Hook->>Log: debug: "dedup-strategy" {strategy: "file"}
-
- loop For each skill pattern
- Hook->>Log: trace: "pattern:test" {skill, patternType, matched}
- end
-
- Hook->>Log: debug: "decision:match" {skill: "nextjs", reason: "pathPattern"}
- Hook->>Log: debug: "decision:boost" {skill: "nextjs", reason: "profiler +5"}
- Hook->>Log: debug: "decision:dedup" {skill: "ai-sdk", reason: "already seen"}
-
- Hook->>FS: tryClaimSessionKey("nextjs")
- FS-->>Hook: true (claimed)
-
- Hook->>Audit: append {event: "skill-injection", injectedSkills: ["nextjs"]}
- Hook->>Log: summary: "complete" {reason: "injected", injectedCount: 1, elapsed_ms: 8}
-
- Hook->>SDK: stdout JSON {hookSpecificOutput: {additionalContext: "..."}}
-```
-
----
-
-## Subagent Lifecycle
-
-When Claude spawns a subagent (via the Agent tool), four hooks coordinate to transfer context, prevent re-injection waste, and record observability data.
-
-```mermaid
-sequenceDiagram
- participant Lead as Lead Agent
- participant Observe as spawn-observe
- participant Bootstrap as subagent-start-bootstrap
- participant Sub as Subagent
- participant Sync as subagent-stop-sync
-
- Lead->>Observe: PreToolUse (Agent tool call)
- Note right of Observe: Record pending launch
(description, prompt, subagent_type)
→ pending-launches.jsonl
-
- Lead->>Bootstrap: SubagentStart fires
- Bootstrap->>Bootstrap: claimPendingLaunch(sessionId, agentType)
- Note right of Bootstrap: Match oldest pending launch
Score prompt against skill signals
Merge with profiler likelySkills
- Bootstrap->>Sub: Inject additionalContext
(profiler + skills, budget-scaled)
- Note right of Sub: Runs with own dedup scope
(scoped claim dir by agent_id)
-
- Sub-->>Sub: Own PreToolUse/PostToolUse cycle
-
- Sub->>Sync: SubagentStop fires
- Note right of Sync: Append to session ledger
(agent_id, type, skills_injected)
-```
-
-### Spawn Observation (PreToolUse → Agent)
-
-**Source**: `hooks/src/pretooluse-subagent-spawn-observe.mts`
-
-Intercepts Agent tool calls and records metadata for later correlation:
-
-```typescript
-interface PendingLaunch {
- description: string;
- prompt: string;
- subagent_type: string;
- resume?: string;
- name?: string;
- createdAt: number;
-}
-```
-
-Storage: `/vercel-plugin--pending-launches.jsonl`
-
-Uses file-based locking (`.lock` file with 2s wait timeout, 10ms polling, 5s stale-lock clearance) and atomic write via temp file + rename.
-
-### Bootstrap Injection (SubagentStart)
-
-**Source**: `hooks/src/subagent-start-bootstrap.mts`
-
-Injects project context into spawned subagents, **scaled by agent type budget**:
-
-| Agent Type | Budget | Content |
-|-----------|--------|---------|
-| **Explore** | ~1 KB | Project profile + skill name list only |
-| **Plan** | ~3 KB | Profile + skill summaries + deployment constraints |
-| **general-purpose** | ~8 KB | Profile + top skills with full SKILL.md bodies |
-| Other/custom | ~8 KB | Treated as general-purpose |
-
-**Context assembly**:
-1. Read cached profiler results (`profile.json`) — fall back to `VERCEL_PLUGIN_LIKELY_SKILLS`
-2. Claim pending launch via `claimPendingLaunch(sessionId, agentType)` — matches against pending records and scores the launch prompt against skill `promptSignals`
-3. Merge profiler skills + prompt-matched skills (prompt scores highest, deduplicated)
-4. Build wrapped context with `` markers
-5. Persist dedup claims scoped by `agentId` via `tryClaimSessionKey()`
-
-### Stop Sync and Ledger (SubagentStop)
-
-**Source**: `hooks/src/subagent-stop-sync.mts`
-
-Records subagent metadata to a session-scoped JSONL ledger:
-
-**Ledger file**: `/vercel-plugin--subagent-ledger.jsonl`
-
-```json
-{
- "timestamp": "2026-03-10T12:05:00.000Z",
- "session_id": "abc-123",
- "agent_id": "def-456",
- "agent_type": "general-purpose",
- "agent_transcript_path": "/path/to/transcript"
-}
-```
-
-Also counts injected skills by reading the scoped claim dir (`listSessionKeys(sessionId, "seen-skills", agentId)`) and logs `skills_injected` as a summary metric.
-
-### Fresh-Env Behavior
-
-Subagents spawned in a fresh environment (no inherited `VERCEL_PLUGIN_SEEN_SKILLS`) fall back to file-based dedup:
-
-- Lead agent uses env var (comma-delimited string) as primary dedup state
-- Subagent with empty/missing env var reads the **claim directory** directly
-- Subagent dedup scope is isolated by `agentId` — sibling subagents and the parent each have independent claim dirs
-
-This means a subagent can re-inject skills that the lead agent already injected, which is intentional: subagents need their own context and should not be starved of skills just because the parent saw them first.
-
----
-
-## Verification Observer
-
-**Source**: `hooks/src/posttooluse-verification-observe.mts`
-
-An observer hook (PostToolUse on Bash) that classifies completed bash commands into verification boundaries and emits structured log events. It does **not** modify tool output — it is purely observational.
-
-### Boundary Classification
-
-Eight pattern groups map bash commands to four boundary types:
-
-| Boundary | Matched Patterns | Examples |
-|----------|-----------------|----------|
-| `uiRender` | browser, screenshot, puppeteer, playwright, chromium, firefox, webkit, `open http://…` | `npx playwright test`, `open https://localhost:3000` |
-| `clientRequest` | curl, wget, httpie, `fetch(`, `npx undici` | `curl http://localhost:3000/api`, `wget https://…` |
-| `serverHandler` | tail/less/cat on `.log`, `tail -f`, `journalctl -f`, vercel logs/inspect, lsof/netstat/ss on ports | `tail -f app.log`, `vercel logs project`, `lsof -i :3000` |
-| `environment` | printenv, env, `echo $VAR`, `vercel env`, `cat .env`, `node -e process.env` | `printenv DATABASE_URL`, `vercel env pull` |
-| `unknown` | *(no pattern matched)* | `git status`, `npm install` |
-
-### Route Inference
-
-Routes are inferred in priority order:
-
-1. **From recent edits** (`VERCEL_PLUGIN_RECENT_EDITS` env var): Extracts routes from file paths like `app/settings/page.tsx` → `/settings`. Strips file suffixes (`page`, `route`, `layout`, `loading`, `error`) and converts `[id]` to `:id`.
-2. **From URLs in command**: Parses `http://localhost:3000/api/users` → `/api/users`
-3. Returns `null` if no route found
-
-### Event Structure
-
-Emitted at `summary` log level:
-
-```json
-{
- "event": "verification.boundary_observed",
- "boundary": "clientRequest",
- "verificationId": "uuid-v4",
- "command": "curl http://localhost:3000/api/users",
- "matchedPattern": "curl/wget/httpie",
- "inferredRoute": "/api/users",
- "timestamp": "2026-03-10T12:03:00.000Z"
-}
-```
-
-The `verificationId` (UUIDv4) enables correlation of boundary observations with other session events.
-
----
-
-## Post-Write Validation
-
-**Source**: `hooks/src/posttooluse-validate.mts`
-
-Runs skill-defined validation rules against files after Write/Edit operations.
-
-### Validation Pipeline
-
-```
-Parse Input → Load Rules → Match File → Run Validation → Format Output
-```
-
-1. **Parse input**: Extract `toolName`, `filePath`, `sessionId`, `cwd` from hook stdin. Skip if not Write/Edit or no `file_path`.
-2. **Load rules**: Scan all skills' `validate:` arrays from SKILL.md frontmatter. Compile path and import patterns.
-3. **Match file**: Test file path against skill `pathPatterns` (glob) and file content against `importPatterns` (regex). Returns list of matched skills with validation rules.
-4. **Run validation**: For each matched skill and rule:
- - Check `skipIfFileContains` (soft skip if file content matches this regex)
- - Compile rule `pattern` to RegExp with `global` flag
- - Test each line of file content
- - Collect violations: `{ skill, line, message, severity, matchedText }`
-5. **Format output**: Error-severity violations → `additionalContext` with fix instructions. Warn-severity → suggestions in debug mode only. Grouped by skill.
-
-**Validation rule schema** (from SKILL.md frontmatter):
-```yaml
-validate:
- - pattern: "hardcoded-secret-regex"
- message: "Do not hardcode secrets; use environment variables"
- severity: "error"
- skipIfFileContains: "process\\.env"
-```
-
-### File-Hash Dedup
-
-Prevents re-validating unchanged files:
-- Computes MD5 hash of file content (first 12 chars)
-- Tracks validated `path:hash` pairs in `VERCEL_PLUGIN_VALIDATED_FILES` env var and session file (`…-validated-files.txt`)
-- Skips validation if the same `path:hash` pair was already validated in this session
-
----
-
-## Session Cleanup
-
-**Source**: `hooks/src/session-end-cleanup.mts`
-
-At SessionEnd, the cleanup hook deletes all session-scoped temp files:
-
-```typescript
-const prefix = `vercel-plugin-${tempSessionIdSegment(sessionId)}-`
-// Glob tmpdir for entries starting with prefix
-// Directories (*.d, *-pending-launches) → rmSync({ recursive: true, force: true })
-// Files → unlinkSync()
-```
-
-Cleanup is **best-effort** — all errors are silently ignored. The hook always exits 0.
-
----
-
-## Temp File Inventory
-
-All session-scoped files live in `os.tmpdir()` with the prefix `vercel-plugin--`:
-
-| File/Dir | Format | Purpose | Created By |
-|----------|--------|---------|------------|
-| `…-seen-skills.d/` | Dir of empty files | Atomic dedup claims | `tryClaimSessionKey()` |
-| `…-seen-skills.txt` | Comma-delimited | Dedup snapshot (synced from claims) | `syncSessionFileFromClaims()` |
-| `…--seen-skills.d/` | Dir of empty files | Scoped subagent dedup claims | `subagent-start-bootstrap` |
-| `…--seen-skills.txt` | Comma-delimited | Scoped subagent dedup snapshot | `subagent-start-bootstrap` |
-| `…-profile.json` | JSON | Cached profiler results | `session-start-profiler` |
-| `…-pending-launches.jsonl` | JSONL | Pending subagent spawn metadata | `pretooluse-subagent-spawn-observe` |
-| `…-pending-launches.jsonl.lock` | Lock file | File-based lock for pending launches | `pretooluse-subagent-spawn-observe` |
-| `…-subagent-ledger.jsonl` | JSONL | Aggregate subagent stop metadata | `subagent-stop-sync` |
-| `…-validated-files.txt` | Comma-delimited | Validated file:hash pairs | `posttooluse-validate` |
-
-All entries are cleaned up by `session-end-cleanup.mjs` at SessionEnd.
diff --git a/docs/README.md b/docs/README.md
deleted file mode 100644
index 2acbf95..0000000
--- a/docs/README.md
+++ /dev/null
@@ -1,185 +0,0 @@
-# Vercel Plugin for Claude Code — Documentation
-
-The Vercel Plugin is an **event-driven skill injection system** for Claude Code. It automatically detects what a developer is working on — by watching file operations, bash commands, imports, and prompt text — and injects precisely the right Vercel platform knowledge into Claude's context window, without any manual configuration. The plugin manages **46 skills** covering the full Vercel ecosystem (Next.js, AI SDK, Functions, Storage, Deployments, and more), delivered through a lifecycle of hooks that fire at key moments during a Claude Code session.
-
----
-
-## Table of Contents
-
-| # | Section | Audience | What You'll Learn |
-|---|---------|----------|-------------------|
-| 1 | [Architecture Overview](./01-architecture-overview.md) | Everyone | System diagram, core concepts, hook lifecycle sequence, complete hook inventory, data flow from SKILL.md to injection, glossary |
-| 2 | [Injection Pipeline Deep-Dive](./02-injection-pipeline.md) | Plugin users | Pattern matching mechanics, ranking algorithm, budget enforcement, prompt signal scoring, dedup system, special triggers |
-| 3 | [Skill Authoring Guide](./03-skill-authoring.md) | Skill authors | Step-by-step tutorial for creating a new skill, frontmatter reference, validation rules, template include engine |
-| 4 | [Operations & Debugging](./04-operations-debugging.md) | Maintainers | Environment variable tuning, log levels, `doctor`/`explain` CLI tools, dedup troubleshooting, debugging decision tree |
-| 5 | [Reference](./05-reference.md) | All | Complete hook registry table, env var reference, SKILL.md frontmatter spec, YAML parser edge cases, full skill catalog, budget constants |
-
-**Additional guides:**
-
-- [Architecture Patterns](./architecture.md) — detailed architecture and design patterns
-- [Developer Guide](./developer-guide.md) — developer workflow and setup
-- [Skill Authoring (extended)](./skill-authoring.md) — comprehensive skill creation reference
-- [Hook Lifecycle](./hook-lifecycle.md) — complete hook execution sequence with timing details
-- [Skill Injection](./skill-injection.md) — pattern matching, ranking, and budget mechanics
-- [CLI Reference](./cli-reference.md) — `explain` and `doctor` command usage
-- [Glossary](./glossary.md) — definitions of 25+ project-specific terms
-- [Observability Guide](./observability.md) — log levels, structured logging, audit logs, and dedup debugging
-
----
-
-## Quick Start for New Contributors
-
-```bash
-# 1. Clone the repository
-git clone vercel-plugin
-cd vercel-plugin
-
-# 2. Install dependencies (requires Bun)
-bun install
-
-# 3. Build everything (hooks + manifest + from-skills templates)
-bun run build
-
-# 4. Run the full test suite (typecheck + 32 test files)
-bun test
-
-# 5. Validate skill structure and manifest parity
-bun run validate
-
-# 6. Self-diagnosis (manifest parity, hook timeouts, dedup health)
-bun run doctor
-
-# 7. See which skills match a file or command
-bun run explain app/api/route.ts
-bun run explain "vercel deploy --prod"
-```
-
-**Day-to-day workflow:**
-
-| Task | Command |
-|------|---------|
-| Edit a hook source file (`.mts`) | `bun run build:hooks` (auto-runs on pre-commit) |
-| Edit a skill's SKILL.md | `bun run build:manifest` to regenerate the manifest |
-| Edit a `.md.tmpl` template | `bun run build:from-skills` to recompile |
-| Run a single test | `bun test tests/.test.ts` |
-| Update golden snapshots | `bun run test:update-snapshots` |
-| Typecheck only | `bun run typecheck` |
-
----
-
-## How It Works (30-Second Version)
-
-```
-Developer opens Claude Code in a Next.js project
- ↓
-SessionStart hooks scan the project → identify likely skills (nextjs, ai-sdk, ...)
- ↓
-Developer types: "Add a cron job for weekly emails"
- ↓
-UserPromptSubmit hook scores prompt → injects cron-jobs skill
- ↓
-Claude reads vercel.json
- ↓
-PreToolUse hook matches file path → injects relevant config skills
- ↓
-Claude writes app/api/cron/route.ts
- ↓
-PostToolUse hook validates the written file against skill rules
- ↓
-SessionEnd hook cleans up temp files
-```
-
-All of this happens transparently. The developer gets expert Vercel guidance without asking for it.
-
----
-
-## Skill Catalog by Category
-
-The plugin ships 46 skills organized into 10 categories. Each skill is a self-contained `skills//SKILL.md` file with YAML frontmatter (patterns, priority, validation rules) and a markdown body (the knowledge injected into Claude's context).
-
-```mermaid
-mindmap
- root((46 Skills))
- Frameworks & Bundlers
- nextjs
- turbopack
- turborepo
- ncc
- next-forge
- micro
- AI & LLM Services
- ai-sdk
- ai-gateway
- ai-generation-persistence
- ai-elements
- chat-sdk
- vercel-agent
- workflow
- UI & Design
- geist
- shadcn
- react-best-practices
- v0-dev
- satori
- json-render
- Storage & Data
- vercel-storage
- swr
- cms
- Infrastructure & Deployment
- vercel-functions
- vercel-sandbox
- deployments-cicd
- vercel-cli
- vercel-api
- vercel-queues
- cron-jobs
- bootstrap
- marketplace
- Networking & Security
- routing-middleware
- runtime-cache
- vercel-firewall
- vercel-flags
- Auth & Identity
- auth
- sign-in-with-vercel
- email
- payments
- Observability
- observability
- env-vars
- Testing & Verification
- agent-browser
- agent-browser-verify
- verification
- investigation-mode
-```
-
----
-
-## Key Files
-
-| File | Purpose |
-|------|---------|
-| `hooks/hooks.json` | Hook registry — all lifecycle event bindings |
-| `generated/skill-manifest.json` | Pre-compiled skill index (glob→regex, frontmatter) |
-| `skills/*/SKILL.md` | Skill definitions (YAML frontmatter + markdown body) |
-| `hooks/src/*.mts` | Hook source code (TypeScript, compiled to `.mjs`) |
-| `CLAUDE.md` | Developer quick-reference guide |
-
----
-
-## Glossary
-
-| Term | Definition |
-|------|-----------|
-| **Hook** | A TypeScript function registered in `hooks/hooks.json` that fires on a specific Claude Code lifecycle event (`SessionStart`, `PreToolUse`, `UserPromptSubmit`, `PostToolUse`, `SessionEnd`). Hooks are the injection engine — they decide *what* knowledge Claude receives and *when*. |
-| **Skill** | A self-contained knowledge module in `skills//SKILL.md`. Each skill has YAML frontmatter (defining when to inject) and a markdown body (the content injected into Claude's context). Skills are the unit of domain knowledge. |
-| **Injection** | The act of inserting a skill's markdown body into Claude's `additionalContext` during a hook invocation. Injection is gated by pattern matching, priority ranking, dedup checks, and budget limits. |
-| **Dedup** | The deduplication system that prevents the same skill from being injected more than once per session. Uses a three-layer state merge: atomic file claims (`O_EXCL`), an env var (`VERCEL_PLUGIN_SEEN_SKILLS`), and a session file — all unioned by `mergeSeenSkillStates()`. |
-| **Claim** | An atomic file created in the claim directory (`/vercel-plugin--seen-skills.d/`) to mark a skill as already injected. Created with `openSync(path, "wx")` (O_EXCL) to guarantee exactly-once semantics even under concurrent hook invocations. |
-| **Budget** | The maximum byte size of skill content that can be injected in a single hook invocation. PreToolUse allows up to **3 skills / 18 KB**; UserPromptSubmit allows up to **2 skills / 8 KB**. If a skill's body exceeds the remaining budget, its `summary` field is injected as a compact fallback. |
-| **Profiler** | The `session-start-profiler` hook that runs at session startup. It scans `package.json` dependencies, config files (`vercel.json`, `next.config.*`, etc.), and project structure to pre-identify *likely skills*, giving them a **+5 priority boost** in subsequent ranking. |
-| **Greenfield** | A project state detected by the profiler when the working directory is empty or has no meaningful source files. In greenfield mode, the `bootstrap` skill is automatically prioritized to help scaffold a new project. |
-| **Manifest** | The pre-compiled skill index at `generated/skill-manifest.json`. Built by `scripts/build-manifest.ts`, it converts glob patterns to regex at build time so hooks can match file paths without parsing SKILL.md files at runtime. Version 2 format with paired arrays (`pathPatterns` ↔ `pathRegexSources`). |
diff --git a/docs/architecture.md b/docs/architecture.md
deleted file mode 100644
index 62242e8..0000000
--- a/docs/architecture.md
+++ /dev/null
@@ -1,574 +0,0 @@
-# Architecture Overview
-
-## What Is vercel-plugin?
-
-**vercel-plugin** is a **hook-driven context router** for Claude Code. It solves a fundamental problem in AI-assisted development: the agent needs domain knowledge to be useful, but has a finite context window.
-
-Two failure modes arise without it:
-
-| Failure Mode | What Happens | Effect |
-|---|---|---|
-| **Too much context** | Every skill is injected upfront | The context window fills with irrelevant instructions; the agent loses focus and makes mistakes |
-| **Too little context** | No skills are injected | The agent lacks Vercel-specific knowledge; it hallucinates APIs, skips best practices, and produces broken deployments |
-
-vercel-plugin threads the needle by **injecting only the right skills at the right time**, driven by what the developer is actually doing — the files they touch, the commands they run, and the questions they ask.
-
----
-
-## High-Level System Architecture
-
-```mermaid
-flowchart TB
- subgraph Claude["Claude Code Runtime"]
- direction TB
- Agent["Agent (LLM)"]
- Tools["Tool Calls
(Read, Edit, Write, Bash, Agent)"]
- Context["Context Window"]
- end
-
- subgraph Plugin["vercel-plugin Hook System"]
- direction TB
- HooksJSON["hooks.json
(12 hook registrations)"]
- Manifest["skill-manifest.json
(pre-compiled patterns)"]
- Skills["skills//SKILL.md
(46 skill definitions)"]
- VercelMD["vercel.md
(ecosystem graph, ~52KB)"]
- end
-
- subgraph Lifecycle["Hook Lifecycle Phases"]
- direction LR
- Startup["Startup
3 hooks"]
- Runtime["Runtime
3 hooks"]
- PostAction["Post-Action
3 hooks"]
- Subagent["Subagent
2 hooks"]
- Teardown["Teardown
1 hook"]
- end
-
- Agent -->|"triggers"| Tools
- Tools -->|"fires hooks"| Plugin
- Plugin -->|"additionalContext"| Context
- Context -->|"informs"| Agent
- HooksJSON --> Lifecycle
-
- style Claude fill:#0a0a1a,stroke:#333,color:#e0e0e0
- style Plugin fill:#1a1a2e,stroke:#333,color:#e0e0e0
- style Lifecycle fill:#16213e,stroke:#333,color:#e0e0e0
-```
-
-### How It Works
-
-1. **Claude Code** registers all hooks from `hooks/hooks.json` at startup
-2. When the agent takes an action (reads a file, runs a command, submits a prompt), Claude Code fires the corresponding hook(s)
-3. Each hook receives JSON on stdin describing the action, evaluates pattern matches, and decides whether to inject skill content
-4. Matched skills are returned as `additionalContext` in the hook's JSON stdout, which Claude Code appends to the agent's context for the current turn
-
----
-
-## Hook Registration Flow
-
-All 12 hooks are declared in `hooks/hooks.json`. Each entry maps a lifecycle event + regex matcher to a Node.js command with an optional timeout.
-
-```mermaid
-flowchart TB
- subgraph Startup ["SessionStart Phase"]
- SS1["session-start-seen-skills.mjs
Initializes dedup state"]
- SS2["session-start-profiler.mjs
Scans project → sets LIKELY_SKILLS"]
- SS3["inject-claude-md.mjs
Outputs vercel.md ecosystem graph"]
- end
-
- subgraph Runtime ["Runtime Phase"]
- PTU["pretooluse-skill-inject.mjs
Read | Edit | Write | Bash
Main injection engine"]
- PSO["pretooluse-subagent-spawn-observe.mjs
Agent
Records pending subagent launches"]
- UPS["user-prompt-submit-skill-inject.mjs
All prompts
Prompt signal scoring"]
- end
-
- subgraph PostAction ["Post-Action Phase"]
- PTV["posttooluse-validate.mjs
Write | Edit
Skill validation rules"]
- PTF["posttooluse-shadcn-font-fix.mjs
Bash
Shadcn font fix"]
- PVO["posttooluse-verification-observe.mjs
Bash
Verification boundary observer"]
- end
-
- subgraph SubagentLC ["Subagent Lifecycle"]
- SAB["subagent-start-bootstrap.mjs
Any subagent
Injects context by agent type budget"]
- SAS["subagent-stop-sync.mjs
Any subagent
Writes ledger + syncs dedup state"]
- end
-
- subgraph Teardown ["Teardown Phase"]
- SEC["session-end-cleanup.mjs
Deletes all temp files"]
- end
-
- Startup --> Runtime
- Runtime --> PostAction
- PostAction --> SubagentLC
- SubagentLC --> Teardown
-
- style Startup fill:#1a1a2e,stroke:#16213e,color:#e0e0e0
- style Runtime fill:#16213e,stroke:#0f3460,color:#e0e0e0
- style PostAction fill:#0f3460,stroke:#533483,color:#e0e0e0
- style SubagentLC fill:#533483,stroke:#e94560,color:#e0e0e0
- style Teardown fill:#e94560,stroke:#e94560,color:#e0e0e0
-```
-
-### Hook Registry Table
-
-| # | Event | Hook File | Matcher | Timeout | Purpose |
-|---|-------|-----------|---------|---------|---------|
-| 1 | SessionStart | `session-start-seen-skills.mjs` | `startup\|resume\|clear\|compact` | — | Initialize dedup env var |
-| 2 | SessionStart | `session-start-profiler.mjs` | `startup\|resume\|clear\|compact` | — | Profile project, set LIKELY_SKILLS |
-| 3 | SessionStart | `inject-claude-md.mjs` | `startup\|resume\|clear\|compact` | — | Inject vercel.md ecosystem graph |
-| 4 | PreToolUse | `pretooluse-skill-inject.mjs` | `Read\|Edit\|Write\|Bash` | 5s | Main skill injection engine |
-| 5 | PreToolUse | `pretooluse-subagent-spawn-observe.mjs` | `Agent` | 5s | Record pending subagent launches |
-| 6 | UserPromptSubmit | `user-prompt-submit-skill-inject.mjs` | _(all prompts)_ | 5s | Prompt signal scoring + injection |
-| 7 | PostToolUse | `posttooluse-shadcn-font-fix.mjs` | `Bash` | 5s | Fix shadcn font loading |
-| 8 | PostToolUse | `posttooluse-verification-observe.mjs` | `Bash` | 5s | Observe verification boundaries |
-| 9 | PostToolUse | `posttooluse-validate.mjs` | `Write\|Edit` | 5s | Run skill validation rules |
-| 10 | SubagentStart | `subagent-start-bootstrap.mjs` | `.+` _(any)_ | 5s | Bootstrap subagent with context |
-| 11 | SubagentStop | `subagent-stop-sync.mjs` | `.+` _(any)_ | 5s | Write ledger, sync dedup |
-| 12 | SessionEnd | `session-end-cleanup.mjs` | — | — | Delete temp files |
-
----
-
-## Skill Injection Pipeline
-
-This is the core data flow — how the plugin decides which skills to inject and in what order.
-
-```mermaid
-flowchart LR
- subgraph Signals ["Signal Collection"]
- FP["File path globs"]
- BC["Bash command regex"]
- IP["Import patterns"]
- PS["Prompt phrases"]
- VC["vercel.json keys"]
- PR["Profiler likely-skills"]
- end
-
- subgraph Scoring ["Score & Rank"]
- PM["Pattern Match
base priority 4-8"]
- VCR["vercel.json routing
+-10 priority"]
- PB["Profiler boost
+5 priority"]
- SM["+50 setup mode boost"]
- TX["+40 TSX review boost"]
- RK["Rank by
final priority"]
- end
-
- subgraph Filtering ["Filter & Budget"]
- DD["Dedup check
(claim dir + env var + session file)"]
- BG["Budget gate
PreToolUse: 3 skills / 18KB
PromptSubmit: 2 skills / 8KB"]
- SF["Summary fallback
(over budget -> short summary)"]
- end
-
- subgraph Output ["Injection"]
- AC["additionalContext
-> Claude's context window"]
- end
-
- Signals --> Scoring
- Scoring --> Filtering
- Filtering --> Output
-
- FP --> PM
- BC --> PM
- IP --> PM
- PS --> PM
- VC --> VCR
- PR --> PB
-
- PM --> RK
- VCR --> RK
- PB --> RK
- SM --> RK
- TX --> RK
-
- RK --> DD
- DD --> BG
- BG --> SF
- SF --> AC
- BG --> AC
-
- style Signals fill:#1a1a2e,stroke:#333,color:#e0e0e0
- style Scoring fill:#16213e,stroke:#333,color:#e0e0e0
- style Filtering fill:#0f3460,stroke:#333,color:#e0e0e0
- style Output fill:#533483,stroke:#333,color:#e0e0e0
-```
-
-### Pipeline Walkthrough
-
-1. **Signal Collection** — When the agent performs an action (opens a file, runs a command, submits a prompt), the relevant hook extracts signals: file paths are matched against glob patterns, bash commands against regex, imports against package names, and prompt text against phrase/allOf/anyOf scoring.
-
-2. **Score & Rank** — Each matched skill starts with its base `priority` (typically 4-8). Multiple boosters can raise it:
-
- | Booster | Value | Source |
- |---------|-------|--------|
- | Profiler | +5 | `VERCEL_PLUGIN_LIKELY_SKILLS` (detected at session start) |
- | vercel.json routing | up to +-10 | Keys in project's `vercel.json` |
- | Setup mode | +50 | `VERCEL_PLUGIN_SETUP_MODE=1` (greenfield/bootstrap projects) |
- | TSX review | +40 | After N `.tsx` edits (default 3) |
- | Dev server detect | boost | When dev server patterns appear in bash |
-
-3. **Filter & Budget** — Skills are deduplicated (no skill injects twice per session), then the top candidates are checked against the byte budget. If the full skill body would exceed the budget, the plugin falls back to injecting just the `summary` field instead.
-
-4. **Injection** — Surviving skills are returned as `additionalContext` in the hook's JSON output, which Claude Code appends to the agent's context for the current turn.
-
----
-
-## Dedup Contract
-
-A skill should never be injected twice in the same session. The dedup system uses three redundant sources of truth, merged on every hook invocation.
-
-```mermaid
-flowchart TB
- subgraph Sources ["Three Sources of Truth"]
- CD["Claim Directory
/vercel-plugin--seen-skills.d/
One empty file per skill (atomic O_EXCL create)"]
- SF["Session File
/vercel-plugin--seen-skills.txt
Comma-delimited snapshot"]
- EV["Env Var
VERCEL_PLUGIN_SEEN_SKILLS
Comma-delimited, set via CLAUDE_ENV_FILE"]
- end
-
- MERGE["mergeSeenSkillStates()
Union of all three sources"]
-
- CD --> MERGE
- SF --> MERGE
- EV --> MERGE
-
- MERGE --> CHECK{"Skill already
in merged set?"}
- CHECK -->|Yes| SKIP["Skip injection"]
- CHECK -->|No| CLAIM["Atomic claim + inject"]
- CLAIM --> UPDATE["Update all three sources"]
-```
-
-### Claim Mechanics
-
-- **Atomic claims**: `openSync(path, "wx")` with the `O_EXCL` flag ensures that if two hooks race to claim the same skill, exactly one succeeds and the other gets `EEXIST`.
-- **Session file**: A comma-delimited text file synced from the claim directory. Acts as a fast-read cache.
-- **Env var**: `VERCEL_PLUGIN_SEEN_SKILLS` persists across hook invocations via `CLAUDE_ENV_FILE`. Initialized to `""` by session-start.
-- **State merge**: `mergeSeenSkillStates()` unions all three sources on every hook call, tolerating partial failures.
-- **Scoped claims**: Subagent dedup claims are scoped by `agentId` to prevent sibling subagents from cross-contaminating each other's state.
-
-### Dedup Strategies
-
-The system uses a fallback chain (visible in debug logs):
-
-| Strategy | Mechanism | When Used |
-|----------|-----------|-----------|
-| `file` | Atomic file claims in tmpdir | Default — most reliable |
-| `env-var` | `VERCEL_PLUGIN_SEEN_SKILLS` only | Fallback if tmpdir is unavailable |
-| `memory-only` | In-memory set for single invocation | Fallback if env file is unavailable |
-| `disabled` | No dedup | When `VERCEL_PLUGIN_HOOK_DEDUP=off` |
-
-### Cleanup
-
-`session-end-cleanup.mjs` deletes the claim directory, session files, pending launch dirs, and profile cache when the session ends. If the session crashes, the OS tmpdir cleanup eventually reclaims the files.
-
----
-
-## Prompt Signal Scoring
-
-The `UserPromptSubmit` hook uses a scoring system to match user prompts to skills. Each skill's `promptSignals` frontmatter defines four signal types:
-
-| Signal | Score | Behavior |
-|--------|-------|----------|
-| `phrases` | **+6** each | Exact substring match (case-insensitive). The primary signal. |
-| `allOf` | **+4** per group | All terms in a group must appear. For compound concepts like "deploy" + "preview". |
-| `anyOf` | **+1** each, **capped at +2** | Optional boosters. Broad terms that add confidence. |
-| `noneOf` | **-Infinity** | Hard suppress. If any term matches, the skill is excluded entirely. |
-
-A skill is injected only if its total score meets `minScore` (default: 6). This means a single phrase match is enough, or an allOf group (+4) plus two anyOf matches (+2) = 6.
-
-### Additional Prompt Routing
-
-- **Troubleshooting intent classification**: The prompt hook detects frustration/debug signals and routes to `investigation-mode` + a companion skill (`workflow`, `agent-browser-verify`, or `vercel-cli`).
-- **Test framework suppression**: When a prompt mentions test frameworks, verification-family skills are suppressed to avoid conflicting instructions.
-- **Investigation companion selection**: When `investigation-mode` triggers, the second slot goes to the best-scoring companion from a priority list.
-
----
-
-## Subagent Architecture
-
-When the main agent spawns subagents, the plugin manages their skill context independently.
-
-```mermaid
-sequenceDiagram
- participant Main as Main Agent
- participant PreTool as pretooluse-subagent-spawn-observe
- participant State as Subagent State (tmpdir)
- participant Bootstrap as subagent-start-bootstrap
- participant Sub as Subagent
- participant Sync as subagent-stop-sync
- participant Ledger as Subagent Ledger (JSONL)
-
- Main->>PreTool: Agent tool call (description, prompt, type)
- PreTool->>State: appendPendingLaunch(sessionId, payload)
- Note over State: Records description, prompt, subagent_type
-
- Main->>Sub: Spawn subagent
- Sub->>Bootstrap: SubagentStart event
- Bootstrap->>State: claimPendingLaunch(sessionId, agentType)
- Bootstrap->>Bootstrap: Resolve budget category
- Note over Bootstrap: Explore=1KB, Plan=3KB, general=8KB
- Bootstrap->>Sub: additionalContext (profiler + skills)
-
- Sub->>Sub: Works on task...
-
- Sub->>Sync: SubagentStop event
- Sync->>Ledger: Append JSONL record
- Note over Ledger: timestamp, agent_id, agent_type, transcript_path
-```
-
-### Budget Categories
-
-Subagent context is sized by agent type to avoid wasting context on lightweight agents:
-
-| Agent Type | Budget | Content |
-|------------|--------|---------|
-| `Explore` | 1KB (minimal) | Project profile + skill names only |
-| `Plan` | 3KB (light) | Profile + skill summaries + deployment constraints |
-| `general-purpose` | 8KB (standard) | Profile + full skill bodies |
-| Other/custom | 8KB (standard) | Treated as general-purpose |
-
----
-
-## Skill Structure
-
-The plugin ships 46 skills in `skills//SKILL.md`. Each skill is a self-contained markdown document with YAML frontmatter that declares its triggers and metadata:
-
-```yaml
----
-name: skill-slug
-description: "One-line description"
-summary: "Brief fallback injected when budget is exceeded"
-metadata:
- priority: 6 # Base priority (4-8 range)
- pathPatterns: ["**/*.prisma"] # File glob triggers
- bashPatterns: ["prisma\\s"] # Bash command regex triggers
- importPatterns: ["@prisma/client"] # Import/require triggers
- promptSignals:
- phrases: ["prisma schema"] # +6 each
- allOf: [["database", "orm"]] # +4 per group
- anyOf: ["migration"] # +1 each (cap +2)
- noneOf: ["mongodb"] # Hard exclude
- minScore: 6
- validate:
- - pattern: "executeRaw\\("
- message: "Use $queryRaw for type safety"
- severity: "warn"
- skipIfFileContains: "\\$queryRaw"
----
-# Skill Title
-
-Markdown body injected as additionalContext...
-```
-
----
-
-## Manifest
-
-`generated/skill-manifest.json` is built by `scripts/build-manifest.ts` from all `SKILL.md` frontmatter. It pre-compiles glob patterns to regex at build time so hooks don't parse YAML or convert globs at runtime.
-
-The manifest uses a **version 2 paired-array format**: `pathPatterns[i]` corresponds to `pathRegexSources[i]`, ensuring globs and their compiled regex stay aligned.
-
-Hooks prefer the manifest over scanning `SKILL.md` files directly. Run `bun run build:manifest` to regenerate after changing any skill frontmatter.
-
----
-
-## YAML Parser Semantics
-
-The plugin uses a custom inline `parseSimpleYaml` (in `skill-map-frontmatter.mjs`), **not** the `js-yaml` library. This has intentional behavioral differences:
-
-| Input | js-yaml | vercel-plugin parser | Rationale |
-|-------|---------|---------------------|-----------|
-| Bare `null` | JavaScript `null` | String `"null"` | Skill frontmatter values should always be strings for pattern matching |
-| Bare `true` / `false` | JavaScript boolean | String `"true"` / `"false"` | Same reason — no type coercion |
-| Unclosed `[` | Parse error | Scalar string (no error) | Graceful degradation for malformed arrays |
-| Tab indentation | Allowed | **Explicit error** | Prevents hard-to-debug YAML whitespace issues |
-
-These choices are deliberate. The parser is optimized for the narrow use case of skill frontmatter where all values are ultimately used as string patterns or display text.
-
----
-
-## Template Include Engine
-
-Agents and commands derive their instructions from skills via `.md.tmpl` templates. This keeps skills as the single source of truth — no copy-pasting skill content into agent definitions.
-
-```mermaid
-flowchart LR
- SKILL["skills/nextjs/SKILL.md
(source of truth)"]
- TMPL["agents/ai-architect.md.tmpl
(template with include markers)"]
- BUILD["bun run build:from-skills"]
- OUTPUT["agents/ai-architect.md
(generated, committed)"]
-
- SKILL --> BUILD
- TMPL --> BUILD
- BUILD --> OUTPUT
-```
-
-Two include formats:
-
-```
-{{include:skill::}} — extracts a section by heading
-{{include:skill::frontmatter:}} — extracts a frontmatter value
-```
-
-**Build**: `bun run build:from-skills` resolves includes and writes output files. 8 templates currently exist across `agents/` and `commands/`.
-
-**Check**: `bun run build:from-skills:check` verifies outputs are up-to-date (exits non-zero on drift).
-
----
-
-## Build Pipeline
-
-```mermaid
-flowchart LR
- SRC["hooks/src/*.mts
(TypeScript source)"]
- HOOKS["hooks/*.mjs
(compiled ESM)"]
- SKILLS["skills/*/SKILL.md
(skill definitions)"]
- MANIFEST["generated/skill-manifest.json"]
- TMPLS["*.md.tmpl
(templates)"]
- AGENTS["agents/*.md + commands/*.md
(generated)"]
-
- SRC -->|"bun run build:hooks
(tsup)"| HOOKS
- SKILLS -->|"bun run build:manifest"| MANIFEST
- SKILLS -->|"bun run build:from-skills"| AGENTS
- TMPLS -->|"bun run build:from-skills"| AGENTS
-```
-
-All three steps are combined in `bun run build`. A pre-commit hook auto-compiles `.mts` files when staged.
-
----
-
-## Environment Variables
-
-| Variable | Default | Source (Writer) | Reader(s) | Description |
-|----------|---------|-----------------|-----------|-------------|
-| `VERCEL_PLUGIN_LOG_LEVEL` | `off` | User / shell | `logger.mts` | Logging verbosity: `off` / `summary` / `debug` / `trace` |
-| `VERCEL_PLUGIN_DEBUG` | — | User / shell | `logger.mts` | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_HOOK_DEBUG` | — | User / shell | `logger.mts` | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_SEEN_SKILLS` | `""` | `session-start-seen-skills` | `pretooluse-skill-inject`, `user-prompt-submit-skill-inject` | Comma-delimited list of already-injected skills |
-| `VERCEL_PLUGIN_HOOK_DEDUP` | — | User / shell | `pretooluse-skill-inject`, `user-prompt-submit-skill-inject`, `prompt-analysis` | Set to `off` to disable dedup entirely |
-| `VERCEL_PLUGIN_LIKELY_SKILLS` | — | `session-start-profiler` | `pretooluse-skill-inject`, `subagent-start-bootstrap` | Comma-delimited profiler-detected skills (+5 boost) |
-| `VERCEL_PLUGIN_GREENFIELD` | — | `session-start-profiler` | `inject-claude-md` | `true` when profiler detects an empty project |
-| `VERCEL_PLUGIN_SETUP_MODE` | — | `session-start-profiler` | `pretooluse-skill-inject` | `1` when bootstrap hints >= 3 (+50 priority boost) |
-| `VERCEL_PLUGIN_BOOTSTRAP_HINTS` | — | `session-start-profiler` | — | Comma-delimited bootstrap signal names |
-| `VERCEL_PLUGIN_RESOURCE_HINTS` | — | `session-start-profiler` | — | Comma-delimited resource category names |
-| `VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE` | — | `session-start-profiler` | `pretooluse-skill-inject` | `1` if `agent-browser` CLI is on PATH |
-| `VERCEL_PLUGIN_INJECTION_BUDGET` | `18000` | User / shell | `pretooluse-skill-inject` | PreToolUse byte budget |
-| `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` | `8000` | User / shell | `user-prompt-submit-skill-inject` | UserPromptSubmit byte budget |
-| `VERCEL_PLUGIN_REVIEW_THRESHOLD` | `3` | User / shell | `pretooluse-skill-inject` | TSX edits before `react-best-practices` injection |
-| `VERCEL_PLUGIN_TSX_EDIT_COUNT` | `0` | `pretooluse-skill-inject` | `pretooluse-skill-inject` | Current `.tsx` edit count |
-| `VERCEL_PLUGIN_DEV_VERIFY_COUNT` | `0` | `pretooluse-skill-inject` | `pretooluse-skill-inject` | Dev server verification event count |
-| `VERCEL_PLUGIN_DEV_COMMAND` | — | `pretooluse-skill-inject` | `pretooluse-skill-inject` | Detected dev server command |
-| `VERCEL_PLUGIN_VALIDATED_FILES` | — | `posttooluse-validate` | `posttooluse-validate` | Comma-delimited `path:hash` pairs of validated files |
-| `VERCEL_PLUGIN_RECENT_EDITS` | — | `pretooluse-skill-inject` | `posttooluse-verification-observe` | Comma-delimited recent file edit paths |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | — | User / shell | `hook-env` | Audit log file path, or `off` to disable |
-| `VERCEL_PLUGIN_LEXICAL_RESULT_MIN_SCORE` | `5.0` | User / shell | `lexical-index` | Minimum score for lexical fallback results |
-| `CLAUDE_ENV_FILE` | — | Claude Code | All hooks | Path to env file for persisting vars across hook invocations |
-| `CLAUDE_PLUGIN_ROOT` | — | Claude Code | All hooks | Root directory of the plugin installation |
-| `CLAUDE_PROJECT_ROOT` | — | Claude Code | `session-start-profiler` | Root directory of the user's project |
-| `SESSION_ID` | — | Claude Code | Multiple hooks | Fallback session ID from Claude Code |
-
----
-
-## User Stories
-
-### "I'm building a Next.js app with Prisma"
-
-1. **Session starts** — The profiler scans `package.json`, finds `next` and `@prisma/client` -> sets `VERCEL_PLUGIN_LIKELY_SKILLS=nextjs,vercel-storage`.
-2. **Developer opens `schema.prisma`** — PreToolUse matches `**/*.prisma` glob -> injects the `vercel-storage` skill with Prisma best practices.
-3. **Developer edits `app/page.tsx`** — PreToolUse matches `.tsx` path -> TSX edit counter increments. After 3 edits, `react-best-practices` is injected.
-4. **Developer writes to `schema.prisma`** — PostToolUse validate runs rules from the skill, catching unsafe `executeRaw` usage.
-5. **Developer asks "how do I deploy to preview?"** — UserPromptSubmit scores against `promptSignals` and injects the `deployments-cicd` skill.
-
-### "I'm starting a brand new project"
-
-1. **Session starts** — The profiler finds no `package.json`, no config files -> sets `VERCEL_PLUGIN_GREENFIELD=true` and `VERCEL_PLUGIN_SETUP_MODE=1`.
-2. **inject-claude-md** outputs greenfield execution mode instructions: skip planning, choose defaults immediately, start executing.
-3. **Developer asks "bootstrap a Next.js app with auth"** — UserPromptSubmit matches phrases from `bootstrap` and `auth` skills -> both are injected (within the 2-skill / 8KB budget).
-4. **Developer runs `npx create-next-app`** — PreToolUse matches the bash pattern -> injects `nextjs` skill with setup mode boost (+50).
-
-### "I'm debugging a slow API route"
-
-1. **Developer opens `app/api/data/route.ts`** — PreToolUse matches the path -> injects `vercel-functions` skill.
-2. **Developer asks "why is my API slow?"** — UserPromptSubmit matches `observability` skill phrases -> injects it alongside function guidance.
-3. **Developer runs `vercel logs`** — PreToolUse matches the bash pattern -> injects `vercel-cli` skill (if not already seen, per dedup).
-4. **Developer runs `curl localhost:3000/api/data`** — PostToolUse verification observer classifies this as a `clientRequest` boundary and emits a structured log event.
-
-### "Agent spawns a research subagent"
-
-1. **Developer triggers a complex task** — Main agent decides to spawn an `Explore` subagent.
-2. **PreToolUse (Agent matcher)** — `pretooluse-subagent-spawn-observe` records the pending launch with description and prompt text.
-3. **SubagentStart** — `subagent-start-bootstrap` reads the pending launch, runs prompt signal matching against the subagent's description, and injects a 1KB minimal context (Explore budget).
-4. **Subagent completes** — `subagent-stop-sync` writes a JSONL ledger entry with agent metadata and transcript path.
-
----
-
-## Source Code Map
-
-```
-hooks/
-├── hooks.json # Hook registry (lifecycle -> matcher -> command)
-├── src/
-│ ├── hook-env.mts # Shared runtime helpers (env, paths, file I/O)
-│ ├── logger.mts # Structured JSON logging (off/summary/debug/trace)
-│ ├── skill-map-frontmatter.mts # Custom YAML parser + buildSkillMap()
-│ ├── patterns.mts # Glob->regex, ranking, atomic claims, seen-skills
-│ ├── prompt-patterns.mts # Prompt signal compiler + scorer
-│ ├── prompt-analysis.mts # Dry-run analysis reports for prompt matching
-│ ├── vercel-config.mts # vercel.json key->skill routing (+-10 priority)
-│ ├── unified-ranker.mts # Combined ranking across all signal types
-│ ├── lexical-index.mts # Lexical fallback scoring for unmatched prompts
-│ ├── stemmer.mts # Word stemming for lexical matching
-│ ├── shared-contractions.mts # Contraction expansion for text normalization
-│ ├── subagent-state.mts # Subagent pending launch state management
-│ ├── session-start-seen-skills.mts # Hook: initialize dedup env var
-│ ├── session-start-profiler.mts # Hook: profile project -> set LIKELY_SKILLS
-│ ├── inject-claude-md.mts # Hook: inject vercel.md ecosystem graph
-│ ├── pretooluse-skill-inject.mts # Hook: main injection engine
-│ ├── pretooluse-subagent-spawn-observe.mts # Hook: record pending subagent launches
-│ ├── user-prompt-submit-skill-inject.mts # Hook: prompt signal scoring + injection
-│ ├── posttooluse-validate.mts # Hook: skill validation rules
-│ ├── posttooluse-verification-observe.mts # Hook: verification boundary observer
-│ ├── subagent-start-bootstrap.mts # Hook: bootstrap subagent context
-│ ├── subagent-stop-sync.mts # Hook: write ledger, sync dedup
-│ └── session-end-cleanup.mts # Hook: delete temp files
-├── posttooluse-shadcn-font-fix.mjs # Standalone hook (no .mts source)
-├── *.mjs # Compiled output (committed, ESM)
-
-skills/
-├── /SKILL.md # 46 skill definitions with YAML frontmatter
-
-generated/
-├── skill-manifest.json # Pre-compiled manifest (globs -> regex)
-├── build-from-skills.manifest.json # Template include build manifest
-
-scripts/
-├── build-manifest.ts # Manifest builder
-├── build-from-skills.ts # Template include engine
-
-src/cli/
-├── explain.ts # `vercel-plugin explain` command
-├── doctor.ts # `vercel-plugin doctor` command
-```
-
----
-
-## CLI Tools
-
-### `vercel-plugin explain `
-
-Shows which skills match a file path or bash command, with priority breakdown and budget simulation.
-
-```bash
-# Explain what fires for a file
-vercel-plugin explain app/api/auth/route.ts
-
-# Explain what fires for a bash command
-vercel-plugin explain "vercel deploy --prod"
-
-# JSON output with budget simulation
-vercel-plugin explain app/page.tsx --json --budget 8000
-```
-
-### `vercel-plugin doctor`
-
-Self-diagnosis: validates manifest parity, checks hook timeout risk, tests dedup correctness, and reports skill map errors.
-
-```bash
-vercel-plugin doctor
-```
diff --git a/docs/cli-reference.md b/docs/cli-reference.md
deleted file mode 100644
index abaf248..0000000
--- a/docs/cli-reference.md
+++ /dev/null
@@ -1,343 +0,0 @@
-# CLI Reference
-
-The vercel-plugin CLI provides two commands for debugging and validating the skill injection system: `explain` and `doctor`.
-
-**Entry point:** `src/cli/index.ts`
-
-```bash
-vercel-plugin [options]
-```
-
----
-
-## Table of Contents
-
-- [`explain` — Skill matching debugger](#explain)
-- [`doctor` — Self-diagnosis](#doctor)
-- [Exit codes](#exit-codes)
-- [Examples & user stories](#examples--user-stories)
-
----
-
-## `explain`
-
-Shows which skills match a given file path or bash command, with priority breakdown, budget simulation, and collision detection. Mirrors the runtime selection pipeline used by `pretooluse-skill-inject.mjs`.
-
-### Usage
-
-```bash
-vercel-plugin explain [options]
-```
-
-The `` is a file path or bash command string. The CLI auto-detects the target type based on heuristics:
-
-- Contains spaces + starts with a known CLI tool (`vercel`, `npm`, `bun`, etc.) → **bash**
-- Contains flag-like patterns (`--flag`) → **bash**
-- Otherwise → **file path**
-
-### Flags
-
-| Flag | Type | Default | Description |
-|------|------|---------|-------------|
-| `--json` | boolean | `false` | Machine-readable JSON output (full `ExplainResult` structure) |
-| `--project ` | string | auto-detected | Plugin root directory. Must contain a `skills/` directory |
-| `--likely-skills ` | string | — | Comma-delimited skill slugs to simulate profiler boost (+5 priority each) |
-| `--budget ` | number | `12000` | Override injection byte budget for simulation |
-
-### Pipeline simulation
-
-The explain command replicates the runtime injection pipeline:
-
-```mermaid
-graph TD
- TARGET["Target input
(file path or bash command)"]
- TARGET --> DETECT["Auto-detect target type
(file vs bash)"]
- DETECT --> COMPILE["Compile skill patterns
(from manifest or live scan)"]
- COMPILE --> MATCH["Match target against
path/bash/import patterns"]
- MATCH --> VCONFIG["Apply vercel.json routing
(±10 priority)"]
- VCONFIG --> PROFILER["Apply profiler boost
(+5 for --likely-skills)"]
- PROFILER --> RANK["Rank by effective priority
(DESC, then alpha ASC)"]
- RANK --> BUDGET["Simulate byte budget
(MAX_SKILLS=3, default 12KB)"]
- BUDGET --> OUTPUT["Output: matches, collisions,
injection modes, budget usage"]
-```
-
-### Injection modes
-
-Each matched skill is assigned an injection mode:
-
-| Mode | Meaning |
-|------|---------|
-| `full` | Full SKILL.md body injected within budget |
-| `summary` | Body exceeded budget; summary field used instead |
-| `droppedByCap` | Exceeded MAX_SKILLS hard cap (3 skills) |
-| `droppedByBudget` | Neither body nor summary fit within remaining budget |
-
-The first matched skill always gets `full` injection regardless of size. Subsequent skills must fit within the remaining budget.
-
-### Match types
-
-| Match type | Description |
-|------------|-------------|
-| `file:full` | Full glob pattern match against file path |
-| `file:basename` | Basename-only match |
-| `file:suffix` | File extension/suffix match |
-| `file:import` | Import/require pattern found in file content |
-| `bash:full` | Regex match against bash command string |
-
-### Human-readable output
-
-```
-Target: middleware.ts (file)
-Skills in manifest: 43
-Budget: 8234 / 12000 bytes
-
-Matched: 3 skill(s)
-Injected: 2 | Summary-only: 1
-
- [INJECT] routing-middleware (4521 bytes)
- priority: 8
- pattern: middleware.{ts,js} (full)
- reason: injected #1 (4521B, total 4521B / 12000B)
- [INJECT] nextjs (3713 bytes)
- priority: 6
- pattern: **/*.{ts,tsx} (suffix)
- reason: injected #2 (3713B, total 8234B / 12000B)
-```
-
-### JSON output
-
-With `--json`, the full `ExplainResult` object is emitted:
-
-```typescript
-interface ExplainResult {
- target: string; // Input target
- targetType: "file" | "bash"; // Detected type
- toolName?: string; // Explicit tool override
- matches: ExplainMatch[]; // All matched skills with injection details
- collisions: ExplainCollision[]; // Skills sharing the same effective priority
- injectedCount: number; // Skills that will be injected (full + summary)
- cappedCount: number; // Skills dropped by cap or budget
- droppedByBudgetCount: number;
- summaryOnlyCount: number;
- skillCount: number; // Total skills in manifest
- budgetBytes: number; // Budget used for simulation
- usedBytes: number; // Actual bytes consumed
- buildWarnings: string[]; // Warnings from SKILL.md parsing
-}
-```
-
-### Collision detection
-
-When multiple skills share the same effective priority, `explain` reports a collision. At runtime, ties are broken alphabetically — the collision warning helps skill authors adjust priorities to get deterministic ordering.
-
-```
-Collisions:
- - vercel-functions, routing-middleware: 2 skills share effective priority 5; tie-broken alphabetically
-```
-
----
-
-## `doctor`
-
-Self-diagnosis command that validates the plugin setup. Checks manifest consistency, hook configuration, dedup state, skill validity, template freshness, and subagent hook registration.
-
-### Usage
-
-```bash
-vercel-plugin doctor [options]
-```
-
-### Flags
-
-| Flag | Type | Default | Description |
-|------|------|---------|-------------|
-| `--json` | boolean | `false` | Machine-readable JSON output (full `DoctorResult` structure) |
-| `--project ` | string | auto-detected | Plugin root directory |
-
-### Checks performed
-
-#### 1. `skill-validation` — Skill map validity
-
-Loads all `skills/*/SKILL.md` files and validates:
-- Valid YAML frontmatter is present
-- Required fields exist (name, description, summary, metadata)
-- Pattern arrays contain valid entries
-
-Reports both errors (invalid skills) and warnings (non-critical issues).
-
-#### 2. `manifest-parity` — Manifest vs live scan consistency
-
-Compares `generated/skill-manifest.json` against a live scan of all `SKILL.md` files:
-
-| Sub-check | Severity | Condition |
-|-----------|----------|-----------|
-| Missing manifest file | warning | `generated/skill-manifest.json` does not exist |
-| Parse failure | error | Manifest JSON is malformed |
-| Skills in live but not manifest | error | New skills added without rebuilding |
-| Skills in manifest but not live | error | Skills deleted without rebuilding |
-| Priority drift | error | Priority value differs between live and manifest |
-| Pattern drift | error | `pathPatterns` or `bashPatterns` differ |
-
-**Fix:** `bun run build:manifest`
-
-#### 3. `hook-timeout` — Performance risk assessment
-
-Warns when the number of skills or patterns approaches levels that could cause the 5-second hook timeout:
-
-| Threshold | Severity | Trigger |
-|-----------|----------|---------|
-| 50+ skills | warning | `liveSkillCount > 50` |
-| 200+ total patterns | warning | `totalPatterns > 200` |
-
-**Mitigation:** Use the pre-built manifest, consolidate low-priority skills, increase pattern specificity.
-
-#### 4. `dedup` — Deduplication state
-
-Validates the `VERCEL_PLUGIN_SEEN_SKILLS` environment variable and dedup strategy:
-
-| Condition | Severity | Message |
-|-----------|----------|---------|
-| `VERCEL_PLUGIN_HOOK_DEDUP=off` | warning | Dedup is explicitly disabled |
-| Invalid format | error | Expected empty or comma-delimited slugs |
-| Env var not set | warning | Dedup limited to single invocation |
-
-#### 5. `template-staleness` — Generated file freshness
-
-Checks whether `.md.tmpl` templates or `SKILL.md` sources are newer than their generated `.md` outputs:
-
-| Condition | Severity |
-|-----------|----------|
-| Template has no generated output | error |
-| Template is newer than output | error |
-| SKILL.md is newer than output | warning |
-
-**Fix:** `bun run build:from-skills`
-
-#### 6. `subagent-hooks` — Subagent hook registration
-
-Validates that `hooks/hooks.json` has proper `SubagentStart` and `SubagentStop` entries:
-
-| Check | Severity | Description |
-|-------|----------|-------------|
-| Missing hook entry | error | Required event not registered |
-| Timeout too high | warning | Exceeds recommended 5-second max |
-| No matcher | warning | Hook won't match any agent types |
-| Uncovered agent types | warning | Expected types (Explore, Plan, general-purpose) not covered by matchers |
-
-### Output format
-
-```
-vercel-plugin doctor
-====================
-
-Skills (live scan): 43
-Skills (manifest): 43
-Total patterns: 127
-Dedup strategy: env-var
-
-All checks passed.
-
-Result: 0 error(s), 0 warning(s)
-```
-
-When issues are found:
-
-```
-vercel-plugin doctor
-====================
-
-Skills (live scan): 43
-Skills (manifest): 42
-Total patterns: 127
-Dedup strategy: env-var
-
-Errors (1):
- [manifest-parity] Skills in live scan but missing from manifest: vercel-queues
- -> Run `bun run build:manifest` to regenerate
-
-Warnings (1):
- [template-staleness] A SKILL.md was modified after commands/deploy.md was last generated
- -> Run `bun run build:from-skills` to regenerate (skill content may have changed)
-
-Result: 1 error(s), 1 warning(s)
-```
-
-### JSON output
-
-With `--json`, the full `DoctorResult` object is emitted:
-
-```typescript
-interface DoctorResult {
- issues: DoctorIssue[];
- summary: {
- manifestSkillCount: number | null;
- liveSkillCount: number;
- totalPatterns: number;
- dedupStrategy: string; // "env-var" | "disabled" | "memory-only"
- };
-}
-
-interface DoctorIssue {
- severity: "error" | "warning";
- check: string; // e.g., "manifest-parity", "hook-timeout"
- message: string;
- hint?: string; // Suggested fix
-}
-```
-
----
-
-## Exit codes
-
-| Code | Meaning |
-|------|---------|
-| `0` | Success (explain: results returned; doctor: all checks pass) |
-| `1` | Issues found (doctor: at least one error-severity issue) |
-| `2` | Unexpected failure (missing arguments, parse errors, no `skills/` directory) |
-
----
-
-## Examples & user stories
-
-### Debugging why a skill isn't injected
-
-```bash
-# See all skills that match your API route file
-vercel-plugin explain app/api/chat/route.ts
-
-# Simulate what happens when the profiler detects ai-gateway
-vercel-plugin explain app/api/chat/route.ts --likely-skills ai-gateway
-
-# Check if budget is the bottleneck
-vercel-plugin explain app/api/chat/route.ts --budget 30000
-```
-
-### Validating after adding a new skill
-
-```bash
-# 1. Build the manifest
-bun run build:manifest
-
-# 2. Verify the new skill matches expected files
-vercel-plugin explain "the/file/it/should/match.ts"
-
-# 3. Run full diagnostics
-vercel-plugin doctor
-```
-
-### CI integration
-
-```bash
-# In your CI pipeline, verify plugin health
-vercel-plugin doctor --json | jq '.issues | length'
-
-# Check template freshness (non-zero exit on drift)
-bun run build:from-skills:check
-```
-
-### Comparing priorities across tools
-
-```bash
-# JSON output for scripting — pipe to jq
-vercel-plugin explain vercel.json --json | jq '.matches[] | {skill, effectivePriority, injectionMode}'
-```
diff --git a/docs/developer-guide.md b/docs/developer-guide.md
deleted file mode 100644
index e3cf265..0000000
--- a/docs/developer-guide.md
+++ /dev/null
@@ -1,566 +0,0 @@
-# Developer Workflows & CLI Reference
-
-This guide covers every build command, CLI tool, testing workflow, and development process in vercel-plugin.
-
----
-
-## Table of Contents
-
-- [Build Pipeline](#build-pipeline)
-- [Build Commands](#build-commands)
-- [Template Include Engine](#template-include-engine)
-- [Testing Architecture](#testing-architecture)
-- [Pre-Commit Hook](#pre-commit-hook)
-- [Playground System](#playground-system)
-- [Environment Variables](#environment-variables)
-- [Troubleshooting](#troubleshooting)
-
----
-
-## Build Pipeline
-
-The project has three independent build stages that combine into a single `bun run build`:
-
-```mermaid
-graph TD
- subgraph "Stage 1 — Hook Compilation"
- MTS["hooks/src/*.mts
(TypeScript sources)"]
- MTS -->|"tsup
hooks/tsup.config.ts"| MJS["hooks/*.mjs
(ESM, committed)"]
- end
-
- subgraph "Stage 2 — Manifest Generation"
- SKILL["skills/*/SKILL.md
(43 skills, YAML frontmatter)"]
- SKILL -->|"build-manifest.ts
glob→regex pre-compile"| MANIFEST["generated/skill-manifest.json
(version 2, paired arrays)"]
- end
-
- subgraph "Stage 3 — Template Compilation"
- TMPL["*.md.tmpl
(8 template files)"]
- SKILL2["skills/*/SKILL.md"] -->|"heading & frontmatter
extraction"| RESOLVER["build-from-skills.ts
(include resolver)"]
- TMPL --> RESOLVER
- RESOLVER -->|"marker replacement"| MD["*.md
(agents + commands)"]
- RESOLVER -->|"dependency tracking"| BMANIFEST["generated/
build-from-skills.manifest.json"]
- end
-
- MJS --> RUNTIME["Runtime: Claude Code
hook execution"]
- MANIFEST --> RUNTIME
- MD --> OUTPUT["agents/*.md
commands/*.md"]
-```
-
-### Data flow summary
-
-1. **TypeScript hooks** (`hooks/src/*.mts`) compile via tsup to ESM modules (`hooks/*.mjs`). Target: `node20`, no bundling, no sourcemaps. The compiled `.mjs` files are committed to the repo so the Claude Agent SDK can execute them directly.
-2. **Skill frontmatter** from 43 `SKILL.md` files gets pre-compiled into `generated/skill-manifest.json` with glob-to-regex conversion for fast runtime matching. The manifest uses a version 2 format with paired arrays (`pathPatterns` ↔ `pathRegexSources`, `bashPatterns` ↔ `bashRegexSources`).
-3. **Templates** (`*.md.tmpl`) pull sections from skills via `{{include:skill:...}}` markers, producing committed `.md` files for agents and commands. A build manifest (`generated/build-from-skills.manifest.json`) tracks dependencies for staleness detection.
-
-### Stage execution order
-
-```
-bun run build
- ├── bun run build:hooks # Stage 1: .mts → .mjs
- ├── bun run build:manifest # Stage 2: SKILL.md → manifest
- └── bun run build:from-skills # Stage 3: .md.tmpl → .md
-```
-
-All three stages are independent and can run in any order, but `build` runs them sequentially for simplicity.
-
----
-
-## Build Commands
-
-### `bun run build:hooks`
-
-Compiles all TypeScript hook sources to ESM.
-
-| Detail | Value |
-|--------|-------|
-| Source | `hooks/src/*.mts` |
-| Output | `hooks/*.mjs` |
-| Tool | tsup with `hooks/tsup.config.ts` |
-| Target | `node20`, no bundling, no sourcemaps |
-
-Run this after editing any `.mts` file. The pre-commit hook runs it automatically when `.mts` files are staged.
-
-### `bun run build:manifest`
-
-Generates the skill manifest from SKILL.md frontmatter.
-
-| Detail | Value |
-|--------|-------|
-| Script | `scripts/build-manifest.ts` |
-| Input | `skills/*/SKILL.md` (43 skills) |
-| Output | `generated/skill-manifest.json` |
-
-The manifest pre-compiles glob patterns to regex at build time so runtime hooks avoid expensive parsing. Version 2 format with paired arrays (`pathPatterns` ↔ `pathRegexSources`).
-
-### `bun run build:from-skills`
-
-Resolves template includes and generates output files.
-
-| Detail | Value |
-|--------|-------|
-| Script | `scripts/build-from-skills.ts` |
-| Templates | 8 files in `agents/` and `commands/` |
-| Output | Corresponding `.md` files + `generated/build-from-skills.manifest.json` |
-
-See [Template Include Engine](#template-include-engine) below for full details.
-
-### `bun run build:from-skills:check`
-
-Verifies generated `.md` files are up-to-date without writing. Exits non-zero on drift. Useful in CI.
-
-### `bun run build`
-
-Runs all three stages sequentially:
-
-```
-bun run build:hooks && bun run build:manifest && bun run build:from-skills
-```
-
-### `bun run typecheck`
-
-Runs TypeScript type checking on hook sources without emitting files:
-
-```
-tsc -p hooks/tsconfig.json --noEmit
-```
-
-### `bun run validate`
-
-Structural validation of all skills and the manifest. Runs `scripts/validate.ts` to check:
-- Every skill has valid YAML frontmatter
-- Required fields are present (name, description, summary, metadata)
-- Pattern arrays contain valid entries
-- Manifest is consistent with live skill data
-
-### `bun run doctor`
-
-Runs `vercel-plugin doctor` (see [docs/cli-reference.md](cli-reference.md) for full details). Self-diagnosis for the plugin setup.
-
----
-
-## Template Include Engine
-
-The template engine (`scripts/build-from-skills.ts`) resolves skill content into agent and command definitions at build time. Skills are the **single source of truth** — templates pull content so agents/commands stay in sync without duplicating prose.
-
-### Marker formats
-
-Two include marker syntaxes are supported:
-
-#### 1. Section extraction
-
-```
-{{include:skill::}}
-```
-
-Extracts a markdown section by heading from `skills//SKILL.md`. The extraction algorithm:
-
-1. Finds the first heading whose text matches `` (case-insensitive, optional leading `#` characters)
-2. Captures all content from that heading to the next heading of equal or higher level
-3. Skips heading detection inside fenced code blocks (``` markers)
-
-**Nested headings** use `>` as a path separator:
-
-```
-{{include:skill:nextjs:Rendering Strategy Decision > Caching Strategy Matrix}}
-```
-
-This first extracts the "Rendering Strategy Decision" section, then extracts "Caching Strategy Matrix" within that narrowed scope.
-
-#### 2. Frontmatter field extraction
-
-```
-{{include:skill::frontmatter:}}
-```
-
-Extracts a YAML frontmatter field value from `skills//SKILL.md`. Supports dotted paths for nested fields (e.g., `frontmatter:metadata.priority`). Returns strings/numbers directly; arrays/objects are JSON-serialized.
-
-### CLI options for `build-from-skills`
-
-| Flag | Description |
-|------|-------------|
-| `--check` | Verify outputs are up-to-date without writing (exit 1 on drift) |
-| `--dry-run` | Print resolved output to stdout without writing |
-| `--json` | Structured JSON output with per-template diagnostics |
-| `--audit` | Coverage report showing what percentage of each template comes from includes |
-| `--skill ` | Reverse-dependency query: which templates depend on a given skill |
-
-### All 8 templates and their source skills
-
-#### Agent templates (`agents/*.md.tmpl`)
-
-| Template | Output | Source skills | Includes |
-|----------|--------|---------------|----------|
-| `ai-architect.md.tmpl` | `ai-architect.md` | `ai-sdk` | Core Functions > Agents, Migration from AI SDK 5 |
-| `deployment-expert.md.tmpl` | `deployment-expert.md` | `vercel-functions`, `deployments-cicd` | Function Runtime Diagnostics (5 subsections), Deployment Strategy Matrix, Common Build Errors, CI/CD Integration, Promote & Rollback |
-| `performance-optimizer.md.tmpl` | `performance-optimizer.md` | `nextjs`, `observability` | Rendering Strategy Decision (6 subsections), Bundle Analyzer, Cache Components, Speed Insights Metrics, Performance Audit Checklist |
-
-#### Command templates (`commands/*.md.tmpl`)
-
-| Template | Output | Source skills | Includes |
-|----------|--------|---------------|----------|
-| `bootstrap.md.tmpl` | `bootstrap.md` | `bootstrap` | Preflight, Rules, Resource Setup, AUTH_SECRET, Env Verification, App Setup, Bootstrap Verification, Summary Format, Next Steps |
-| `deploy.md.tmpl` | `deploy.md` | `observability`, `deployments-cicd` | Deploy Preflight, Post-Deploy Error Scan, Preview/Production Deployment, Inspect, Summary, Next Steps |
-| `env.md.tmpl` | `env.md` | `env-vars` | vercel env CLI (List/Pull/Add/Remove), Environment-Specific Config, Gotchas |
-| `marketplace.md.tmpl` | `marketplace.md` | `marketplace` | Observability Integration Path |
-| `status.md.tmpl` | `status.md` | `observability` | Drains Security/Signature, Fallback Guidance, Decision Matrix |
-
-### User story: editing a skill updates downstream templates
-
-```
-Developer edits skills/observability/SKILL.md
- → Pre-commit hook detects staged SKILL.md
- → Runs `bun run build:from-skills:check`
- → Detects drift in commands/deploy.md and commands/status.md
- → Auto-regenerates with `bun run build:from-skills`
- → Stages updated .md files
- → Exits with code 1 ("review and re-commit")
- → Developer reviews diff, runs `git commit` again
-```
-
-### Diagnostic codes
-
-The template engine emits structured diagnostics when includes fail:
-
-| Code | Meaning |
-|------|---------|
-| `SKILL_NOT_FOUND` | `skills//SKILL.md` does not exist |
-| `HEADING_NOT_FOUND` | Heading text not found in the skill body |
-| `FRONTMATTER_NOT_FOUND` | YAML field not present in frontmatter |
-| `STALE_OUTPUT` | Output file is out of date (--check mode) |
-
----
-
-## Testing Architecture
-
-### Running tests
-
-```bash
-bun test # Typecheck + all test files
-bun test tests/.test.ts # Single test file
-bun run test:update-snapshots # Regenerate golden snapshots
-```
-
-`bun test` runs typecheck first (`tsc -p hooks/tsconfig.json --noEmit`), then all test files.
-
-### Test categories
-
-The test suite is organized into functional categories:
-
-```mermaid
-graph LR
- subgraph "Hook Integration"
- A1["pretooluse-skill-inject"]
- A2["user-prompt-submit"]
- A3["posttooluse-validate"]
- A4["session-start-profiler"]
- A5["session-start-seen-skills"]
- A6["session-end-cleanup"]
- end
-
- subgraph "Subagent Lifecycle"
- B1["subagent-fresh-env"]
- B2["subagent-lifecycle-integration"]
- B3["subagent-start-context"]
- B4["subagent-start-bootstrap-routing"]
- B5["subagent-scope-dedup"]
- B6["subagent-state"]
- B7["session-timeline-subagent"]
- end
-
- subgraph "Pattern Matching"
- C1["patterns"]
- C2["fuzz-glob"]
- C3["fuzz-yaml"]
- C4["prompt-signals"]
- C5["prompt-analysis"]
- C6["prompt-patterns-lexical"]
- C7["unified-ranker"]
- end
-
- subgraph "Snapshots & Golden Files"
- D1["snapshot-runner"]
- D2["snapshots"]
- end
-
- subgraph "Validation & Skill Map"
- E1["validate"]
- E2["validate-rules"]
- E3["build-skill-map"]
- E4["skill-map-frontmatter"]
- E5["external-skill-resolution"]
- end
-
- subgraph "Build & Templates"
- F1["build-from-skills"]
- F2["build-from-skills-integration"]
- F3["build-from-skills-workflow"]
- end
-
- subgraph "CLI & Infrastructure"
- G1["cli-explain"]
- G2["hooks-json-structural"]
- G3["doctor-subagent-hooks"]
- G4["hook-sync"]
- G5["logger"]
- G6["vercel-config"]
- end
-
- subgraph "Scenario & Specialized"
- H1["notion-clone-patterns"]
- H2["slack-clone-patterns"]
- H3["tsx-review-trigger"]
- H4["dev-server-verify"]
- H5["verification-skill"]
- H6["verification-logging"]
- H7["verification-intent-routing"]
- H8["redaction"]
- H9["stemmer"]
- H10["lexical-index"]
- end
-
- subgraph "Benchmark"
- I1["benchmark-pipeline"]
- I2["benchmark-analyze"]
- end
-```
-
-#### Hook integration tests
-
-End-to-end tests for each hook entry point. They simulate Claude Agent SDK hook invocations with realistic tool input and verify the correct skills are injected, dedup state is maintained, and output conforms to `SyncHookJSONOutput`.
-
-| Test file | Hook under test | Key assertions |
-|-----------|----------------|----------------|
-| `pretooluse-skill-inject` | PreToolUse | Path/bash/import matching, priority ranking, budget enforcement, dedup |
-| `user-prompt-submit` | UserPromptSubmit | Prompt signal scoring (phrases/allOf/anyOf/noneOf), 2-skill cap, 8KB budget |
-| `posttooluse-validate` | PostToolUse | Validation rule matching, severity levels, `skipIfFileContains` |
-| `session-start-profiler` | SessionStart | Config file scanning, dependency detection, greenfield mode |
-| `session-start-seen-skills` | SessionStart | Env var initialization, claim dir creation |
-| `session-end-cleanup` | SessionEnd | Temp file deletion, claim dir cleanup |
-
-#### Pattern matching tests
-
-Unit tests for the matching and compilation layer. Cover glob-to-regex conversion, bash command regex, import pattern detection, YAML parsing edge cases, and prompt signal scoring.
-
-#### Snapshot tests
-
-Golden-file regression tests. `snapshot-runner` generates skill injection metadata for each `vercel.json` fixture and compares against committed baselines. Update with `bun run test:update-snapshots`.
-
-#### Validation tests
-
-Test the YAML frontmatter parser, skill map construction, structural validation rules, and external skill resolution. Exercises the custom `parseSimpleYaml` parser's intentional differences from `js-yaml` (bare `null` → string `"null"`, bare booleans → strings, unclosed `[` → scalar).
-
-#### Build & template tests
-
-Test the template include engine: marker regex matching, section extraction with nested headings, frontmatter field resolution, code block fence skipping, and full compilation pipeline.
-
-#### Benchmark tests
-
-Performance regression tests for the injection pipeline. `benchmark-pipeline` measures pattern compilation and matching latency; `benchmark-analyze` validates that results stay within acceptable bounds.
-
-#### CLI tests
-
-Tests for `vercel-plugin explain` covering target type detection (file vs bash), pattern matching output, priority calculations with profiler/vercel.json boosts, budget simulation, and collision detection.
-
-#### Scenario tests
-
-Real-world regression tests that simulate specific project types (Notion clone, Slack clone) to verify correct skill injection for realistic file and dependency combinations.
-
----
-
-## Pre-Commit Hook
-
-The `.git/hooks/pre-commit` script automates two tasks:
-
-### 1. Hook compilation
-
-When any `hooks/src/*.mts` file is staged:
-
-```
-1. Typecheck: bun run typecheck
-2. Compile: bun run build:hooks
-3. Stage: git add hooks/*.mjs
-```
-
-### 2. Template freshness
-
-When any `.md.tmpl`, `SKILL.md`, `build-from-skills.ts`, or `skill-map-frontmatter.mts` is staged:
-
-```
-1. Check: bun run build:from-skills:check
-2. If stale: bun run build:from-skills
-3. Stage: git add agents/*.md commands/*.md
-4. Exit 1: "Generated files updated and staged. Please review and re-commit."
-```
-
-The hook exits with code 1 after regeneration so you can review the changes before committing. Simply run `git commit` again after reviewing.
-
----
-
-## Playground System
-
-The playground generates static skill files for external AI coding tools. Lives in `.playground/`.
-
-### Structure
-
-```
-.playground/
-├── generate-all.ts # Unified CLI entry point
-├── _shared/
-│ ├── emitter.ts # Context creation + skill flattening
-│ ├── plugin-discovery.ts # Discovers skills from plugin root
-│ ├── skill-discovery.ts # Skill data extraction
-│ ├── types.ts # Shared types (DiscoveredSkill, PluginManifest, etc.)
-│ └── marker-patch.ts # {{include:…}} marker resolution for external tools
-├── codex-cli/generate.ts # → .codex/ directory structure
-├── cursor/generate.ts # → .cursor/rules/
-├── vscode-copilot/generate.ts # → .github/copilot-instructions.md
-├── opencode/generate.ts # → .opencode/
-├── gemini-cli/generate.ts # → .gemini/commands/
-├── gemini-code-assist/generate.ts # → .gemini/skills/
-├── _fixtures/ # Test plugins (full, minimal, collision, oversized, etc.)
-└── _snapshots/ # Golden output snapshots
-```
-
-### Running the generator
-
-```bash
-bun run playground:generate
-```
-
-**Options:**
-
-| Flag | Description |
-|------|-------------|
-| `--plugins ` | Plugin root to discover skills from (default: `.playground/_fixtures`) |
-| `--out ` | Output directory (default: `.playground/_output`) |
-| `--dry-run` | Preview without writing files |
-| `--target ` | Comma-separated generator names (e.g., `cursor,codex-cli`) |
-
-**Supported generators:** `codex-cli`, `cursor`, `vscode-copilot`, `opencode`, `gemini-cli`, `gemini-code-assist`
-
-### Workflow
-
-```mermaid
-sequenceDiagram
- participant Dev as Developer
- participant Gen as generate-all.ts
- participant Disc as plugin-discovery.ts
- participant Emit as Per-tool generator
-
- Dev->>Gen: bun run playground:generate
- Gen->>Disc: Discover plugins (skills, commands, agents, hooks)
- Disc-->>Gen: DiscoveredPlugin[]
- Gen->>Emit: Spawn generators in parallel (one per target tool)
- Emit-->>Gen: Generated files + stats
- Gen-->>Dev: JSON report (file counts, durations, skill counts)
-```
-
-Output is a JSON report with file counts, durations, and per-generator statistics.
-
----
-
-## Environment Variables
-
-These variables control runtime behavior. Set them before running Claude Code or in tests.
-
-| Variable | Default | Description |
-|----------|---------|-------------|
-| `VERCEL_PLUGIN_LOG_LEVEL` | `off` | Logging verbosity: `off`, `summary`, `debug`, `trace` |
-| `VERCEL_PLUGIN_DEBUG` | — | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_SEEN_SKILLS` | `""` | Comma-delimited already-injected skill slugs |
-| `VERCEL_PLUGIN_HOOK_DEDUP` | — | Set to `off` to disable deduplication |
-| `VERCEL_PLUGIN_LIKELY_SKILLS` | — | Profiler-detected skills (comma-delimited, +5 boost) |
-| `VERCEL_PLUGIN_GREENFIELD` | — | `true` when project is empty (set by profiler) |
-| `VERCEL_PLUGIN_INJECTION_BUDGET` | `18000` | PreToolUse byte budget |
-| `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` | `8000` | UserPromptSubmit byte budget |
-| `VERCEL_PLUGIN_REVIEW_THRESHOLD` | `3` | TSX edits before `react-best-practices` injection |
-| `VERCEL_PLUGIN_TSX_EDIT_COUNT` | `0` | Current `.tsx` edit count |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | — | Audit log path or `off` |
-
----
-
-## Troubleshooting
-
-### "Generated files are out of date"
-
-The pre-commit hook or CI detected drift between `.md.tmpl` templates and their `.md` outputs.
-
-**Fix:**
-
-```bash
-bun run build:from-skills
-git add agents/*.md commands/*.md
-```
-
-### Manifest parity errors from `doctor`
-
-The `generated/skill-manifest.json` is out of sync with live `SKILL.md` files.
-
-**Fix:**
-
-```bash
-bun run build:manifest
-```
-
-### Typecheck failures
-
-Hook source uses TypeScript features that need compilation. The `tsc` target is `hooks/tsconfig.json`.
-
-**Fix:**
-
-```bash
-bun run typecheck # See errors
-# Fix the .mts files, then:
-bun run build:hooks
-```
-
-### Hook timeout (5-second limit)
-
-All PreToolUse, UserPromptSubmit, PostToolUse, SubagentStart, and SubagentStop hooks have a 5-second timeout. If you add many skills or patterns, `doctor` will warn you.
-
-**Diagnose:**
-
-```bash
-bun run doctor
-```
-
-**Mitigations:**
-- Use the pre-built manifest (`build:manifest`) to avoid live YAML scanning
-- Consolidate low-priority skills
-- Increase pattern specificity to reduce false-positive matching
-
-### Dedup not working (skills injected twice)
-
-**Check:** Is `session-start-seen-skills.mjs` running on SessionStart? Run `doctor` to verify.
-
-**Debug:** Set `VERCEL_PLUGIN_LOG_LEVEL=debug` to see dedup strategy selection and claim attempts in stderr.
-
-### Pre-commit hook not running
-
-Verify the hook exists and is executable:
-
-```bash
-ls -la .git/hooks/pre-commit
-chmod +x .git/hooks/pre-commit
-```
-
-### Playground generator fails
-
-Ensure the plugin root has a `skills/` directory with valid SKILL.md files:
-
-```bash
-bun run playground:generate --dry-run
-```
-
-The `--dry-run` flag previews without writing, showing discovery errors on stderr.
-
-### Tests fail after adding a new skill
-
-After adding a new `skills//SKILL.md`:
-
-```bash
-bun run build:manifest # Update manifest
-bun run build:from-skills # Update templates (if referenced)
-bun run test:update-snapshots # Update golden snapshots
-bun test # Verify everything passes
-```
diff --git a/docs/glossary.md b/docs/glossary.md
deleted file mode 100644
index 7a8de16..0000000
--- a/docs/glossary.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Glossary
-
-Definitions of project-specific terms used throughout the vercel-plugin codebase and documentation.
-
----
-
-| Term | Definition |
-|------|-----------|
-| **additionalContext** | The field in a Claude Code hook's JSON output (`SyncHookJSONOutput`) used to inject skill content into Claude's context window. Each hook invocation can return one or more `additionalContext` entries. |
-| **allOf** | A prompt signal group where **all** terms must appear in the user's prompt for the group to score. Each matching `allOf` group contributes **+4** to the skill's prompt score. Defined in `metadata.promptSignals.allOf`. |
-| **anyOf** | A prompt signal list where **any** matching term adds **+1**, capped at **+2** total. Used for broad topic hints that shouldn't dominate scoring. Defined in `metadata.promptSignals.anyOf`. |
-| **Atomic claim** | A zero-byte file created with `openSync(path, "wx")` (`O_EXCL` flag) in the claim directory. The OS guarantees only one process succeeds, providing exactly-once injection semantics even under concurrent hook invocations. See **Claim directory**. |
-| **Budget** | The maximum byte size of skill content injectable per hook invocation. PreToolUse: **3 skills / 18 KB** (`VERCEL_PLUGIN_INJECTION_BUDGET`). UserPromptSubmit: **2 skills / 8 KB** (`VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET`). When a skill body exceeds remaining budget, its `summary` field is injected as a compact fallback. |
-| **Claim directory** | A per-session directory at `/vercel-plugin--seen-skills.d/` containing one empty file per already-injected skill. The primary layer of the three-layer dedup system. Cleaned up by `session-end-cleanup`. |
-| **Compiled pattern** | A `{ pattern: string, regex: RegExp }` pair produced at build time (for manifest entries) or at runtime (for live SKILL.md scanning). Glob patterns are converted to regex via `globToRegex()` in `patterns.mts`. |
-| **Dedup** | The deduplication system preventing the same skill from being injected more than once per session. Merges three state sources: atomic file claims, `VERCEL_PLUGIN_SEEN_SKILLS` env var, and a session file — unioned by `mergeSeenSkillStates()`. |
-| **Effective priority** | A skill's final ranking score after all boosts are applied: base `metadata.priority` (4–8) + profiler boost (+5) + vercel.json routing (±10) + special triggers (+40/+50). Higher values are injected first. |
-| **Frontmatter** | The YAML block between `---` delimiters at the top of each `SKILL.md` file. Contains `name`, `description`, `summary`, `metadata` (priority, patterns, prompt signals, validation rules). Parsed by `parseSimpleYaml` — not `js-yaml`. |
-| **Greenfield** | A project state detected by the profiler when the working directory is empty or lacks meaningful source files. Triggers automatic prioritization of the `bootstrap` skill. Signaled via `VERCEL_PLUGIN_GREENFIELD=true`. |
-| **Hook** | A TypeScript function registered in `hooks/hooks.json` that fires on a specific Claude Code lifecycle event (`SessionStart`, `PreToolUse`, `UserPromptSubmit`, `PostToolUse`, `SessionEnd`). Hooks decide what knowledge Claude receives and when. |
-| **Injection** | The act of inserting a skill's markdown body into Claude's `additionalContext` during a hook invocation. Gated by pattern matching, priority ranking, dedup checks, and budget limits. |
-| **Invocation ID** | An 8-character hex string (`randomBytes(4).toString("hex")`) shared across all logger instances within a single hook process. Used to correlate log lines from the same hook invocation. |
-| **Lexical index** | A fallback scoring system (`lexical-index.mts`) that tokenizes prompt text and matches against skill keywords when no prompt signals fire. Returns scored results above `VERCEL_PLUGIN_LEXICAL_RESULT_MIN_SCORE` (default 5.0). |
-| **Manifest** | The pre-compiled skill index at `generated/skill-manifest.json`. Built by `scripts/build-manifest.ts`, it converts glob patterns to regex at build time. Version 2 format with paired arrays (`pathPatterns` ↔ `pathRegexSources`, etc.). |
-| **mergeSeenSkillStates()** | The function that unions all three dedup state sources (claim directory files, env var, session file) into a single set of seen skill names. Ensures consistency even if one source is stale. |
-| **minScore** | The threshold a skill's prompt signal score must reach before it qualifies for injection via `UserPromptSubmit`. Default is **6**. Configured per-skill in `metadata.promptSignals.minScore`. |
-| **noneOf** | A prompt signal blocklist. If **any** `noneOf` term appears in the user's prompt, the skill's score is set to `-Infinity`, hard-suppressing it. Prevents false-positive injections. Defined in `metadata.promptSignals.noneOf`. |
-| **parseSimpleYaml** | The plugin's custom YAML parser (in `skill-map-frontmatter.mts`). Intentionally differs from `js-yaml`: bare `null` → string `"null"`, bare `true`/`false` → strings, unclosed `[` → scalar string, tab indentation → error. |
-| **Phrases** | Prompt signal keywords that score **+6** each via exact case-insensitive substring matching. The strongest single-term signal. Defined in `metadata.promptSignals.phrases`. |
-| **Profiler** | The `session-start-profiler` hook. Scans `package.json` dependencies, config files (`vercel.json`, `next.config.*`), and project structure at session start. Sets `VERCEL_PLUGIN_LIKELY_SKILLS` (comma-delimited), granting matched skills a **+5 priority boost**. |
-| **Prompt signals** | The scoring system in `UserPromptSubmit` that matches user prompt text against skill-defined keywords. Composed of `phrases` (+6), `allOf` (+4), `anyOf` (+1 capped at +2), and `noneOf` (hard suppress). Compiled by `prompt-patterns.mts`. |
-| **Session file** | A text file at `/vercel-plugin--seen-skills.txt` containing a comma-delimited snapshot of seen skills. The second layer of the dedup system, synced from the claim directory. |
-| **Skill** | A self-contained knowledge module in `skills//SKILL.md`. Each has YAML frontmatter (defining when to inject) and a markdown body (the content injected into Claude's context). Skills are the unit of domain knowledge. The plugin ships 46 skills. |
-| **Skill map** | The in-memory `Map` built by `buildSkillMap()` from either the manifest or live SKILL.md files. Maps skill name → compiled patterns, priority, summary, and validation rules. |
-| **Summary fallback** | When a skill's full markdown body would exceed the remaining injection budget, the hook injects the skill's `summary` field instead — a compact one-line description that still provides useful context. |
-| **SyncHookJSONOutput** | The TypeScript type (from `@anthropic-ai/claude-agent-sdk`) defining the JSON structure hooks must return. Key fields: `additionalContext` (injected content), `env` (environment variable updates), `decision` (allow/block). |
-| **Template include** | The `{{include:skill::}}` marker syntax used in `.md.tmpl` files. Resolved at build time by `scripts/build-from-skills.ts`, which extracts sections from SKILL.md files and compiles them into the output `.md` files. |
-| **TSX review trigger** | A special PreToolUse behavior: after `VERCEL_PLUGIN_REVIEW_THRESHOLD` (default 3) `.tsx` file edits, the `react-best-practices` skill is injected with a **+40 priority boost**. Counter tracked in `VERCEL_PLUGIN_TSX_EDIT_COUNT`. |
-| **Validation rules** | Per-skill `metadata.validate` entries that run during `PostToolUse` (Write/Edit). Each rule has a `pattern` (regex matched against file content), `message`, `severity` (error/warn), and optional `skipIfFileContains`. |
-| **vercel.json routing** | Priority adjustments (±10) applied by `vercel-config.mts` based on keys present in the project's `vercel.json`. For example, `rewrites` boosts `routing-middleware`; `crons` boosts `cron-jobs`. |
-
----
-
-## See Also
-
-- [Architecture Overview](./01-architecture-overview.md) — system diagram and core concepts
-- [Injection Pipeline](./02-injection-pipeline.md) — how pattern matching, ranking, and budget work together
-- [Operations & Debugging](./04-operations-debugging.md) — environment variables and troubleshooting
-- [Observability Guide](./observability.md) — log levels, structured logging, and audit trails
diff --git a/docs/hook-lifecycle.md b/docs/hook-lifecycle.md
deleted file mode 100644
index ad89f17..0000000
--- a/docs/hook-lifecycle.md
+++ /dev/null
@@ -1,944 +0,0 @@
-# Hook Lifecycle Deep Dive
-
-This document covers every hook entry point in `hooks/hooks.json`, organized by lifecycle phase. Each section includes input/output contracts, sequence diagrams, and implementation details.
-
----
-
-## Table of Contents
-
-1. [Lifecycle Overview](#lifecycle-overview)
-2. [SessionStart Phase](#sessionstart-phase)
- - [session-start-seen-skills](#1-session-start-seen-skills)
- - [session-start-profiler](#2-session-start-profiler)
- - [inject-claude-md](#3-inject-claude-md)
-3. [PreToolUse Phase](#pretooluse-phase)
- - [pretooluse-skill-inject](#4-pretooluse-skill-inject)
- - [pretooluse-subagent-spawn-observe](#5-pretooluse-subagent-spawn-observe)
-4. [UserPromptSubmit Phase](#userpromptsubmit-phase)
- - [user-prompt-submit-skill-inject](#6-user-prompt-submit-skill-inject)
-5. [PostToolUse Phase](#posttooluse-phase)
- - [posttooluse-shadcn-font-fix](#7-posttooluse-shadcn-font-fix)
- - [posttooluse-verification-observe](#8-posttooluse-verification-observe)
- - [posttooluse-validate](#9-posttooluse-validate)
-6. [SubagentStart Phase](#subagentstart-phase)
- - [subagent-start-bootstrap](#10-subagent-start-bootstrap)
-7. [SubagentStop Phase](#subagentstop-phase)
- - [subagent-stop-sync](#11-subagent-stop-sync)
-8. [SessionEnd Phase](#sessionend-phase)
- - [session-end-cleanup](#12-session-end-cleanup)
-9. [Hook I/O Contract](#hook-io-contract)
-10. [Custom YAML Parser Semantics](#custom-yaml-parser-semantics)
-11. [Environment Variables Reference](#environment-variables-reference)
-
----
-
-## Lifecycle Overview
-
-Every hook fires at a specific point in Claude Code's execution cycle. The following diagram shows the complete lifecycle from session start to session end, including all 12 hook entry points.
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant SS as SessionStart Hooks
- participant Agent as Agent (LLM)
- participant PTU as PreToolUse Hooks
- participant UPS as UserPromptSubmit Hook
- participant PostTU as PostToolUse Hooks
- participant SA as Subagent Hooks
- participant SE as SessionEnd Hook
-
- Note over CC,SE: Session Lifecycle
-
- CC->>SS: startup | resume | clear | compact
- activate SS
- SS-->>CC: Initialize dedup, profile project, inject vercel.md
- deactivate SS
-
- loop Every user prompt
- CC->>UPS: User types a prompt
- activate UPS
- UPS-->>CC: Score prompt signals -> inject 0-2 skills
- deactivate UPS
-
- loop Every tool call
- CC->>PTU: Agent calls Read/Edit/Write/Bash/Agent
- activate PTU
- PTU-->>CC: Match patterns -> inject 0-3 skills
- deactivate PTU
-
- Agent->>Agent: Tool executes
-
- CC->>PostTU: Tool completes (Write/Edit/Bash)
- activate PostTU
- PostTU-->>CC: Validate files / observe verification / fix fonts
- deactivate PostTU
- end
- end
-
- opt Agent spawns subagent
- CC->>SA: SubagentStart
- activate SA
- SA-->>CC: Bootstrap context (1-8KB by agent type)
- deactivate SA
-
- Note over SA: Subagent works...
-
- CC->>SA: SubagentStop
- activate SA
- SA-->>CC: Write ledger, sync dedup
- deactivate SA
- end
-
- CC->>SE: Session ends
- activate SE
- SE-->>CC: Delete all temp files
- deactivate SE
-```
-
----
-
-## SessionStart Phase
-
-These hooks fire once when a session begins, resumes, is cleared, or compacted. They set up the environment for all subsequent hooks.
-
-**Matcher**: `startup|resume|clear|compact`
-
-**Execution order**: Hooks run in the order listed in `hooks.json` — seen-skills first, then profiler, then inject-claude-md.
-
----
-
-### 1. session-start-seen-skills
-
-**Source**: `hooks/src/session-start-seen-skills.mts` (17 lines)
-**Timeout**: None
-**Output**: None (side-effect only)
-
-#### Purpose
-
-Initializes the dedup state by writing `VERCEL_PLUGIN_SEEN_SKILLS=""` to `CLAUDE_ENV_FILE`. This ensures the PreToolUse and UserPromptSubmit hooks start with a blank slate for skill dedup tracking.
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as session-start-seen-skills
- participant Env as CLAUDE_ENV_FILE
-
- CC->>Hook: SessionStart event (stdin: JSON)
- Hook->>Env: appendFileSync('export VERCEL_PLUGIN_SEEN_SKILLS=""')
- Hook-->>CC: exit 0 (no stdout)
-```
-
-#### Implementation Details
-
-- Reads `CLAUDE_ENV_FILE` from environment (required — `requireEnvFile()` exits if missing)
-- Appends a single `export` line — does not overwrite existing content
-- Failures are silently ignored (non-critical)
-- This must run **before** the profiler to ensure the env var exists when the profiler writes `LIKELY_SKILLS`
-
----
-
-### 2. session-start-profiler
-
-**Source**: `hooks/src/session-start-profiler.mts` (620 lines)
-**Timeout**: None
-**Output**: stdout text (CLI status messages), env var side-effects
-
-#### Purpose
-
-Scans the project's `package.json`, config files, directory structure, and Vercel CLI version to:
-1. Determine which skills are likely relevant (`VERCEL_PLUGIN_LIKELY_SKILLS`)
-2. Detect bootstrap/setup signals (`VERCEL_PLUGIN_BOOTSTRAP_HINTS`, `VERCEL_PLUGIN_SETUP_MODE`)
-3. Detect greenfield (empty) projects (`VERCEL_PLUGIN_GREENFIELD`)
-4. Check if `agent-browser` CLI is available (`VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE`)
-5. Report Vercel CLI installation and update status
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as session-start-profiler
- participant FS as File System
- participant Env as CLAUDE_ENV_FILE
- participant Cache as Profile Cache (tmpdir)
-
- CC->>Hook: SessionStart event (stdin: { session_id })
- Hook->>FS: Check greenfield (readdirSync)
- Hook->>FS: Scan FILE_MARKERS (next.config.*, vercel.json, etc.)
- Hook->>FS: Read package.json -> match PACKAGE_MARKERS
- Hook->>FS: Read vercel.json -> check crons, rewrites, functions
- Hook->>Hook: Detect bootstrap signals (env templates, prisma, drizzle, auth)
- Hook->>Hook: Check Vercel CLI version (vercel --version + npm view)
- Hook->>Hook: Check agent-browser on PATH
- Hook->>Env: Write VERCEL_PLUGIN_LIKELY_SKILLS
- Hook->>Env: Write VERCEL_PLUGIN_GREENFIELD (if empty)
- Hook->>Env: Write VERCEL_PLUGIN_SETUP_MODE (if hints >= 3)
- Hook->>Env: Write VERCEL_PLUGIN_BOOTSTRAP_HINTS
- Hook->>Env: Write VERCEL_PLUGIN_RESOURCE_HINTS
- Hook->>Env: Write VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE
- Hook->>Cache: Write profile.json (for subagent bootstrap)
- Hook-->>CC: stdout: CLI status messages (if outdated/missing)
-```
-
-#### File Markers
-
-The profiler checks for these files to determine likely skills:
-
-| File | Skills Detected |
-|------|-----------------|
-| `next.config.{js,mjs,ts,mts}` | `nextjs`, `turbopack` |
-| `turbo.json` | `turborepo` |
-| `vercel.json` | `vercel-cli`, `deployments-cicd`, `vercel-functions` |
-| `.mcp.json` | `vercel-api` |
-| `middleware.{ts,js}` | `routing-middleware` |
-| `components.json` | `shadcn` |
-| `.env.local` | `env-vars` |
-| `pnpm-workspace.yaml` | `turborepo` |
-
-#### Package Markers
-
-Dependencies in `package.json` map to skills:
-
-| Package | Skills |
-|---------|--------|
-| `next` | `nextjs` |
-| `ai`, `@ai-sdk/*` | `ai-sdk`, `ai-elements`, `ai-gateway` |
-| `@vercel/blob`, `@vercel/kv`, `@vercel/postgres`, `@vercel/edge-config` | `vercel-storage` |
-| `@vercel/analytics`, `@vercel/speed-insights` | `observability` |
-| `@vercel/flags` | `vercel-flags` |
-| `@vercel/workflow` | `workflow` |
-| `@vercel/queue` | `vercel-queues` |
-| `turbo` | `turborepo` |
-| `@repo/*`, `@t3-oss/env-nextjs` | `next-forge` |
-
-#### Bootstrap Signal Detection
-
-The profiler detects setup/bootstrap signals that trigger `VERCEL_PLUGIN_SETUP_MODE` when 3 or more hints are found:
-
-- **Env templates**: `.env.example`, `.env.sample`, `.env.template`
-- **README**: Any file starting with `readme`
-- **Database**: `drizzle.config.*`, `prisma/schema.prisma`, `db:push`/`db:seed` scripts
-- **Auth**: `next-auth`, `@auth/core`, `better-auth` dependencies
-- **Resources**: `@neondatabase/serverless`, `drizzle-orm`, `@upstash/redis`
-
-#### Greenfield Detection
-
-A project is greenfield if:
-- Every top-level entry is a dot-directory (`.git`, `.claude`)
-- No dot-files exist (`.env.local`, `.mcp.json` indicate real config)
-
-Greenfield projects get default skills: `nextjs`, `ai-sdk`, `vercel-cli`, `env-vars`.
-
----
-
-### 3. inject-claude-md
-
-**Source**: `hooks/src/inject-claude-md.mts` (33 lines)
-**Timeout**: None
-**Output**: stdout text (vercel.md content as additionalContext)
-
-#### Purpose
-
-Outputs the `vercel.md` ecosystem graph (~52KB) as `additionalContext`. This gives the agent a map of the entire Vercel ecosystem before any specific skills fire. If the project is greenfield, it also appends execution mode instructions.
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as inject-claude-md
- participant FS as File System
-
- CC->>Hook: SessionStart event
- Hook->>FS: Read vercel.md from plugin root
- alt Greenfield project
- Hook->>Hook: Append greenfield execution instructions
- Note over Hook: "Skip planning, choose defaults, start executing"
- end
- Hook-->>CC: stdout: vercel.md content (~52KB)
-```
-
----
-
-## PreToolUse Phase
-
-These hooks fire **before** a tool call executes. They can inject additional context or observe the pending action.
-
----
-
-### 4. pretooluse-skill-inject
-
-**Source**: `hooks/src/pretooluse-skill-inject.mts` (~1300 lines)
-**Matcher**: `Read|Edit|Write|Bash`
-**Timeout**: 5 seconds
-**Output**: JSON with `additionalContext`
-
-#### Purpose
-
-The main injection engine. When the agent calls Read, Edit, Write, or Bash, this hook:
-1. Parses the tool input (file path or bash command)
-2. Matches against all skills' `pathPatterns`, `bashPatterns`, and `importPatterns`
-3. Applies priority boosters (profiler, vercel.json, setup mode)
-4. Deduplicates against already-injected skills
-5. Injects up to 3 skills within an 18KB byte budget
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as pretooluse-skill-inject
- participant Manifest as skill-manifest.json
- participant Dedup as Dedup State
- participant Skills as SKILL.md files
-
- CC->>Hook: PreToolUse (stdin: { tool_name, tool_input, session_id })
- Hook->>Hook: parseInput -> extract file path or bash command
- Hook->>Manifest: Load skill map (prefer manifest over scanning)
- Hook->>Hook: compileSkillPatterns -> create regex matchers
- Hook->>Hook: matchPathWithReason / matchBashWithReason / matchImportWithReason
- Hook->>Hook: Apply vercel.json routing (+-10)
- Hook->>Hook: Apply profiler boost (+5 for LIKELY_SKILLS)
- Hook->>Hook: Apply setup mode boost (+50 if SETUP_MODE=1)
- Hook->>Hook: Check TSX review trigger (+40 after N edits)
- Hook->>Hook: Check dev server detection
- Hook->>Hook: rankEntries -> sort by final priority DESC
- Hook->>Dedup: mergeSeenSkillStates (env + file + claims)
- Hook->>Dedup: Filter already-seen skills
- loop For each ranked skill (up to 3, within 18KB)
- Hook->>Skills: Read SKILL.md body
- alt Body fits budget
- Hook->>Hook: Add full body to parts
- else Over budget
- Hook->>Hook: Add summary fallback
- end
- Hook->>Dedup: Atomic claim + update env var
- end
- Hook-->>CC: JSON { hookSpecificOutput: { additionalContext } }
-```
-
-#### Pipeline Stages
-
-The hook is organized as a testable pipeline:
-
-```
-parseInput -> loadSkills -> matchSkills -> deduplicateSkills -> injectSkills -> formatOutput
-```
-
-#### Special Triggers
-
-| Trigger | Condition | Effect |
-|---------|-----------|--------|
-| **TSX review** | After `VERCEL_PLUGIN_REVIEW_THRESHOLD` (default 3) `.tsx` edits | Injects `react-best-practices` with +40 priority boost |
-| **Dev server detection** | Bash command matches `next dev`, `npm run dev`, etc. | Boosts `agent-browser-verify` |
-| **Vercel env help** | First `vercel env` command | One-time injection of env-vars guidance |
-| **Setup mode** | `VERCEL_PLUGIN_SETUP_MODE=1` | +50 priority boost for matched skills |
-
-#### Input Schema
-
-```json
-{
- "tool_name": "Read|Edit|Write|Bash",
- "tool_input": {
- "file_path": "app/page.tsx",
- "command": "vercel deploy --prod"
- },
- "session_id": "abc-123",
- "cwd": "/Users/dev/my-app"
-}
-```
-
-#### Output Schema
-
-```json
-{
- "hookSpecificOutput": {
- "additionalContext": "\n[vercel-plugin] Best practices...\n\n\n..."
- }
-}
-```
-
----
-
-### 5. pretooluse-subagent-spawn-observe
-
-**Source**: `hooks/src/pretooluse-subagent-spawn-observe.mts` (141 lines)
-**Matcher**: `Agent`
-**Timeout**: 5 seconds
-**Output**: `{}` (observer only)
-
-#### Purpose
-
-Fires when the agent spawns a subagent. Records the pending launch metadata (description, prompt, subagent_type) in tmpdir so the `subagent-start-bootstrap` hook can correlate the launch with the right skill context.
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as pretooluse-subagent-spawn-observe
- participant State as Pending Launch State (tmpdir)
-
- CC->>Hook: PreToolUse (stdin: { tool_name: "Agent", tool_input: {...} })
- Hook->>Hook: parseInput -> validate Agent tool, extract session_id
- Hook->>Hook: buildPendingLaunchRecord(toolInput, timestamp)
- Hook->>State: appendPendingLaunch(sessionId, payload)
- Note over State: JSONL in /vercel-plugin--pending-launches/
- Hook-->>CC: "{}" (no mutation)
-```
-
-#### What Gets Recorded
-
-```json
-{
- "description": "Research authentication patterns",
- "prompt": "Find how auth is implemented in this codebase",
- "subagent_type": "Explore",
- "createdAt": 1710000000000,
- "name": "auth-researcher"
-}
-```
-
----
-
-## UserPromptSubmit Phase
-
-This hook fires when the user submits a prompt, before the agent processes it.
-
----
-
-### 6. user-prompt-submit-skill-inject
-
-**Source**: `hooks/src/user-prompt-submit-skill-inject.mts` (703 lines)
-**Matcher**: _(all prompts)_
-**Timeout**: 5 seconds
-**Output**: JSON with `additionalContext`
-
-#### Purpose
-
-Scores the user's prompt text against `promptSignals` defined in skill frontmatter. Injects up to 2 skills within an 8KB budget. Also handles troubleshooting intent routing and investigation companion selection.
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as user-prompt-submit
- participant Skills as Skill Map
- participant Dedup as Dedup State
- participant Analysis as Prompt Analysis
-
- CC->>Hook: UserPromptSubmit (stdin: { prompt, session_id })
- Hook->>Hook: parsePromptInput -> validate length >= 10 chars
- Hook->>Hook: normalizePromptText -> lowercase, expand contractions
- Hook->>Skills: loadSkills -> build skill map
- Hook->>Analysis: analyzePrompt -> score all skills with promptSignals
-
- loop For each skill with promptSignals
- Analysis->>Analysis: Score phrases (+6 each)
- Analysis->>Analysis: Score allOf groups (+4 per match)
- Analysis->>Analysis: Score anyOf terms (+1 each, cap +2)
- Analysis->>Analysis: Check noneOf (-Infinity if matched)
- Analysis->>Analysis: Compare score vs minScore (default 6)
- end
-
- Hook->>Hook: classifyTroubleshootingIntent
- alt Investigation mode triggered
- Hook->>Hook: selectInvestigationCompanion
- Note over Hook: Pick best from: workflow, agent-browser-verify, vercel-cli
- end
- alt Test framework mentioned
- Hook->>Hook: Suppress verification-family skills
- end
-
- Hook->>Dedup: Filter already-seen skills
- Hook->>Hook: Cap at 2 skills, enforce 8KB budget
-
- loop For each selected skill
- Hook->>Skills: Read SKILL.md body
- Hook->>Dedup: Atomic claim + sync
- end
-
- Hook-->>CC: JSON { hookSpecificOutput: { hookEventName: "UserPromptSubmit", additionalContext } }
-```
-
-#### Scoring Example
-
-Given a skill with:
-```yaml
-promptSignals:
- phrases: ["deploy to preview"] # +6
- allOf: [["deploy", "branch"]] # +4
- anyOf: ["ci", "github"] # +1 each, cap +2
- noneOf: ["rollback"]
- minScore: 6
-```
-
-- Prompt "how do I deploy to preview?" -> phrase match (+6) -> score 6 >= minScore 6 -> **matched**
-- Prompt "deploy my branch to CI" -> allOf (+4) + anyOf "ci" (+1) -> score 5 < minScore 6 -> **not matched**
-- Prompt "rollback the deploy" -> noneOf "rollback" -> score -Infinity -> **suppressed**
-
-#### Investigation Companion Selection
-
-When `investigation-mode` is selected, the hook picks the best companion skill:
-
-| Priority | Companion | When Selected |
-|----------|-----------|---------------|
-| 1st | `workflow` | Best score among companions |
-| 2nd | `agent-browser-verify` | If workflow doesn't match |
-| 3rd | `vercel-cli` | Fallback companion |
-
----
-
-## PostToolUse Phase
-
-These hooks fire **after** a tool call completes. They observe results, validate outputs, or apply fixes.
-
----
-
-### 7. posttooluse-shadcn-font-fix
-
-**Source**: `hooks/posttooluse-shadcn-font-fix.mjs` (standalone, no `.mts` source)
-**Matcher**: `Bash`
-**Timeout**: 5 seconds
-**Output**: JSON with `additionalContext` (fix instructions)
-
-#### Purpose
-
-After a Bash command completes, detects and fixes shadcn font loading issues. This is a standalone hook with no TypeScript source — it's a simple pattern-match-and-fix.
-
----
-
-### 8. posttooluse-verification-observe
-
-**Source**: `hooks/src/posttooluse-verification-observe.mts` (285 lines)
-**Matcher**: `Bash`
-**Timeout**: 5 seconds
-**Output**: `{}` (observer only — emits structured log events)
-
-#### Purpose
-
-After a Bash command completes, classifies the command into a verification boundary type and emits structured log events. This powers the verification pipeline that tracks whether the agent is testing at all system boundaries.
-
-#### Boundary Classification
-
-| Boundary | Pattern Examples | Label |
-|----------|-----------------|-------|
-| `uiRender` | `open`, `screenshot`, `playwright`, `puppeteer` | Browser/UI interaction |
-| `clientRequest` | `curl`, `wget`, `fetch(`, `httpie` | HTTP client requests |
-| `serverHandler` | `tail -f *.log`, `vercel logs`, port inspection | Server/log inspection |
-| `environment` | `printenv`, `vercel env`, `cat .env` | Environment reads |
-
-#### Story Inference
-
-The hook infers the target route from two sources (in priority order):
-1. `VERCEL_PLUGIN_RECENT_EDITS` — file paths recently edited, e.g. `app/settings/page.tsx` -> `/settings`
-2. URL patterns in the command itself, e.g. `curl http://localhost:3000/api/data` -> `/api/data`
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as posttooluse-verification-observe
- participant Log as Structured Logger
-
- CC->>Hook: PostToolUse (stdin: { tool_name: "Bash", tool_input: { command } })
- Hook->>Hook: parseInput -> extract command
- Hook->>Hook: classifyBoundary(command)
- alt Boundary matched
- Hook->>Hook: inferRoute(command, RECENT_EDITS)
- Hook->>Log: Emit verification.boundary_observed event
- Note over Log: { boundary, verificationId, command, inferredRoute }
- end
- Hook-->>CC: "{}" (observer only)
-```
-
----
-
-### 9. posttooluse-validate
-
-**Source**: `hooks/src/posttooluse-validate.mts` (550 lines)
-**Matcher**: `Write|Edit`
-**Timeout**: 5 seconds
-**Output**: JSON with `additionalContext` (validation violations)
-
-#### Purpose
-
-After a Write or Edit, matches the target file against skills and runs any `validate` rules defined in skill frontmatter. Returns fix instructions if validation fails.
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as posttooluse-validate
- participant Skills as Skill Map
- participant FS as File System
- participant Dedup as Validation Dedup
-
- CC->>Hook: PostToolUse (stdin: { tool_name: "Write", tool_input: { file_path } })
- Hook->>Hook: parseInput -> extract file path
- Hook->>FS: Read file content from disk
- Hook->>Dedup: Check file+hash already validated?
- alt Already validated (same content)
- Hook-->>CC: "{}" (skip)
- end
- Hook->>Skills: loadValidateRules -> filter skills with validate: rules
- Hook->>Hook: matchFileToSkills -> match by path globs + import patterns
- loop For each matched skill's validate rules
- Hook->>Hook: Check skipIfFileContains regex
- Hook->>Hook: Run pattern regex against each line
- alt Pattern matches
- Hook->>Hook: Record violation (line, message, severity)
- end
- end
- Hook->>Dedup: Mark file+hash as validated
- Hook-->>CC: JSON with violations or "{}"
-```
-
-#### Validation Rule Format
-
-```yaml
-validate:
- - pattern: "executeRaw\\("
- message: "Use $queryRaw for type safety instead of executeRaw"
- severity: "error"
- skipIfFileContains: "\\$queryRaw"
-```
-
-- **`pattern`**: Regex matched against each line of the file
-- **`message`**: Error description returned to the agent
-- **`severity`**: `error` (mandatory fix) or `warn` (suggestion)
-- **`skipIfFileContains`**: If this regex matches anywhere in the file, skip this rule
-
-#### Validation Dedup
-
-Tracks `file_path:content_hash` pairs in `VERCEL_PLUGIN_VALIDATED_FILES` to avoid re-validating unchanged files. Uses MD5 hash (first 12 hex chars) for fast comparison.
-
----
-
-## SubagentStart Phase
-
-This hook fires when any subagent starts.
-
----
-
-### 10. subagent-start-bootstrap
-
-**Source**: `hooks/src/subagent-start-bootstrap.mts` (427 lines)
-**Matcher**: `.+` (any subagent)
-**Timeout**: 5 seconds
-**Output**: JSON with `additionalContext`
-
-#### Purpose
-
-When any subagent starts, bootstraps it with relevant skill context. The context size is tailored to the agent type:
-
-| Agent Type | Budget | Content Strategy |
-|------------|--------|------------------|
-| `Explore` | 1KB (minimal) | Project profile line + skill name list |
-| `Plan` | 3KB (light) | Profile + skill summaries + deployment constraints |
-| `general-purpose` | 8KB (standard) | Profile + full skill bodies (with summary fallback) |
-| Other/custom | 8KB (standard) | Same as general-purpose |
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as subagent-start-bootstrap
- participant Cache as Profile Cache
- participant State as Pending Launch State
- participant Skills as Skill Map
- participant Dedup as Dedup Claims
-
- CC->>Hook: SubagentStart (stdin: { session_id, agent_id, agent_type })
- Hook->>Cache: Read profiler cache (profile.json)
- alt Cache hit
- Hook->>Hook: Use cached likelySkills
- else Cache miss
- Hook->>Hook: Fallback to VERCEL_PLUGIN_LIKELY_SKILLS env var
- end
- Hook->>State: claimPendingLaunch(sessionId, agentType)
- alt Pending launch found
- Hook->>Hook: Match prompt text against skill signals
- Hook->>Hook: Merge prompt-matched skills with profiler skills
- end
- Hook->>Hook: resolveBudgetCategory(agentType)
- alt Minimal (Explore)
- Hook->>Hook: buildMinimalContext (profile + skill names)
- else Light (Plan)
- Hook->>Skills: Load skill summaries within 3KB
- Hook->>Hook: buildLightContext (profile + summaries + constraints)
- else Standard (general-purpose)
- Hook->>Skills: Load full SKILL.md bodies within 8KB
- Hook->>Hook: buildStandardContext (profile + full bodies)
- end
- Hook->>Dedup: Claim injected skills (scoped by agentId)
- Hook-->>CC: JSON { hookSpecificOutput: { hookEventName: "SubagentStart", additionalContext } }
-```
-
-#### Pending Launch Correlation
-
-The hook reads the pending launch directory written by `pretooluse-subagent-spawn-observe` to extract the subagent's description and prompt. It then runs prompt signal matching against this text to determine additional relevant skills beyond what the profiler detected.
-
----
-
-## SubagentStop Phase
-
-This hook fires when any subagent stops.
-
----
-
-### 11. subagent-stop-sync
-
-**Source**: `hooks/src/subagent-stop-sync.mts` (141 lines)
-**Matcher**: `.+` (any subagent)
-**Timeout**: 5 seconds
-**Output**: None (side-effect only)
-
-#### Purpose
-
-When any subagent stops, writes a JSONL ledger entry for observability and counts the skills injected for that agent.
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as subagent-stop-sync
- participant Ledger as Ledger File (JSONL)
- participant Dedup as Dedup Claims
-
- CC->>Hook: SubagentStop (stdin: { session_id, agent_id, agent_type, agent_transcript_path })
- Hook->>Ledger: Append JSONL record
- Note over Ledger: /vercel-plugin--subagent-ledger.jsonl
- Hook->>Dedup: Count skills injected for this agent (scoped claims)
- Hook->>Hook: Log summary (agent_id, agent_type, skills_injected)
- Hook-->>CC: exit 0 (no stdout)
-```
-
-#### Ledger Entry Format
-
-```json
-{
- "timestamp": "2026-03-10T12:00:00.000Z",
- "session_id": "abc-123",
- "agent_id": "agent-456",
- "agent_type": "Explore",
- "agent_transcript_path": "/path/to/transcript"
-}
-```
-
----
-
-## SessionEnd Phase
-
-This hook fires when the session ends.
-
----
-
-### 12. session-end-cleanup
-
-**Source**: `hooks/src/session-end-cleanup.mts` (81 lines)
-**Matcher**: None (fires on all session ends)
-**Timeout**: None
-**Output**: None (side-effect only)
-
-#### Purpose
-
-Best-effort cleanup of all session-scoped temporary files. Always exits successfully, even if cleanup fails.
-
-#### What Gets Cleaned Up
-
-| Path Pattern | Type | Contents |
-|-------------|------|----------|
-| `/vercel-plugin--seen-skills.d/` | Directory | Atomic skill claim files |
-| `/vercel-plugin--seen-skills.txt` | File | Comma-delimited seen skills |
-| `/vercel-plugin--pending-launches/` | Directory | Subagent pending launch records |
-| `/vercel-plugin--subagent-ledger.jsonl` | File | Subagent lifecycle ledger |
-| `/vercel-plugin--profile.json` | File | Profiler cache |
-| `/vercel-plugin--validated-files.txt` | File | Validation dedup state |
-
-#### Sequence
-
-```mermaid
-sequenceDiagram
- participant CC as Claude Code
- participant Hook as session-end-cleanup
- participant FS as File System (tmpdir)
-
- CC->>Hook: SessionEnd (stdin: { session_id })
- Hook->>Hook: Parse session_id from stdin
- Hook->>Hook: Hash session_id if non-alphanumeric
- Hook->>FS: readdirSync(tmpdir) -> filter by prefix
- loop For each matching entry
- alt Entry ends with .d or -pending-launches
- Hook->>FS: rmSync(path, { recursive: true })
- else Regular file
- Hook->>FS: unlinkSync(path)
- end
- end
- Hook-->>CC: exit 0 (always succeeds)
-```
-
----
-
-## Hook I/O Contract
-
-All hooks follow the same I/O contract defined by `SyncHookJSONOutput` from `@anthropic-ai/claude-agent-sdk`:
-
-### Input (stdin)
-
-```json
-{
- "tool_name": "Read",
- "tool_input": { "file_path": "app/page.tsx" },
- "session_id": "abc-123",
- "cwd": "/Users/dev/my-app",
- "hook_event_name": "PreToolUse"
-}
-```
-
-For `UserPromptSubmit`:
-```json
-{
- "prompt": "How do I deploy to preview?",
- "session_id": "abc-123",
- "cwd": "/Users/dev/my-app",
- "hook_event_name": "UserPromptSubmit"
-}
-```
-
-For `SubagentStart` / `SubagentStop`:
-```json
-{
- "session_id": "abc-123",
- "cwd": "/Users/dev/my-app",
- "agent_id": "agent-456",
- "agent_type": "Explore",
- "hook_event_name": "SubagentStart"
-}
-```
-
-### Output (stdout)
-
-Hooks that inject context return:
-```json
-{
- "hookSpecificOutput": {
- "hookEventName": "PreToolUse",
- "additionalContext": "\n..."
- }
-}
-```
-
-Observer-only hooks and hooks with no matches return:
-```json
-{}
-```
-
-### Error Handling
-
-All hooks follow defensive patterns:
-- Catch all errors and log to stderr
-- Always write valid JSON to stdout (at minimum `{}`)
-- Never crash the Claude Code session — graceful degradation is preferred
-- Timeouts (5s) kill the hook process; Claude Code continues without the hook's output
-
----
-
-## Custom YAML Parser Semantics
-
-The plugin uses `parseSimpleYaml` (in `hooks/src/skill-map-frontmatter.mts`), a custom inline YAML parser purpose-built for skill frontmatter. It is **not** `js-yaml`.
-
-### Why a Custom Parser?
-
-Skill frontmatter values are always used as strings for pattern matching. The standard YAML spec converts values like `null`, `true`, and `false` to their JavaScript equivalents, which would break pattern matching.
-
-### Behavioral Differences
-
-| Input | Standard YAML (js-yaml) | vercel-plugin parser | Rationale |
-|-------|------------------------|---------------------|-----------|
-| Bare `null` | JavaScript `null` | String `"null"` | Patterns should always be strings |
-| Bare `true` | JavaScript `true` | String `"true"` | No type coercion |
-| Bare `false` | JavaScript `false` | String `"false"` | No type coercion |
-| Unclosed `[items` | Parse error (throws) | Scalar string `"[items"` | Graceful degradation |
-| Tab indentation | Allowed | **Explicit error thrown** | Prevents hard-to-debug whitespace issues |
-| `---` delimiters | Standard | Standard | Same behavior |
-| Nested objects | Full support | Indentation-based nesting | Same behavior |
-| Array items (`- item`) | Standard | Standard | Same behavior |
-| Inline arrays (`[a, b]`) | Standard | Standard | Same behavior |
-
-### Tab Error Example
-
-```yaml
----
-name: my-skill
-metadata:
- priority: 6 # <-- Tab character: parser throws explicit error
----
-```
-
-The parser will throw with a message indicating the tab character and line number, making it easy to find and fix.
-
-### Frontmatter Extraction
-
-The `extractFrontmatter()` function splits a SKILL.md into:
-- `yaml`: The raw YAML string between `---` delimiters
-- `body`: The markdown content after the closing `---`
-
-The `buildSkillMap()` function reads all `skills/*/SKILL.md` files, extracts frontmatter, parses it with `parseSimpleYaml`, validates the structure, and returns a `Record` keyed by skill slug.
-
----
-
-## Environment Variables Reference
-
-### Plugin-Controlled Variables
-
-These are set and read by the plugin's hooks. Writers and readers are listed to show data flow.
-
-| Variable | Default | Writer(s) | Reader(s) | Lifecycle |
-|----------|---------|-----------|-----------|-----------|
-| `VERCEL_PLUGIN_SEEN_SKILLS` | `""` | `session-start-seen-skills` (init), `pretooluse-skill-inject` (append), `user-prompt-submit` (append) | `pretooluse-skill-inject`, `user-prompt-submit` | Session-scoped |
-| `VERCEL_PLUGIN_LIKELY_SKILLS` | — | `session-start-profiler` | `pretooluse-skill-inject`, `subagent-start-bootstrap` | Session-scoped |
-| `VERCEL_PLUGIN_GREENFIELD` | — | `session-start-profiler` | `inject-claude-md` | Session-scoped |
-| `VERCEL_PLUGIN_SETUP_MODE` | — | `session-start-profiler` | `pretooluse-skill-inject` | Session-scoped |
-| `VERCEL_PLUGIN_BOOTSTRAP_HINTS` | — | `session-start-profiler` | — | Session-scoped |
-| `VERCEL_PLUGIN_RESOURCE_HINTS` | — | `session-start-profiler` | — | Session-scoped |
-| `VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE` | — | `session-start-profiler` | `pretooluse-skill-inject` | Session-scoped |
-| `VERCEL_PLUGIN_TSX_EDIT_COUNT` | `0` | `pretooluse-skill-inject` | `pretooluse-skill-inject` | Session-scoped, counter |
-| `VERCEL_PLUGIN_DEV_VERIFY_COUNT` | `0` | `pretooluse-skill-inject` | `pretooluse-skill-inject` | Session-scoped, counter |
-| `VERCEL_PLUGIN_DEV_COMMAND` | — | `pretooluse-skill-inject` | `pretooluse-skill-inject` | Session-scoped |
-| `VERCEL_PLUGIN_VALIDATED_FILES` | — | `posttooluse-validate` | `posttooluse-validate` | Session-scoped |
-| `VERCEL_PLUGIN_RECENT_EDITS` | — | `pretooluse-skill-inject` | `posttooluse-verification-observe` | Session-scoped |
-
-### User-Configurable Variables
-
-These can be set by the user to customize plugin behavior.
-
-| Variable | Default | Effect |
-|----------|---------|--------|
-| `VERCEL_PLUGIN_LOG_LEVEL` | `off` | Logging verbosity: `off`, `summary`, `debug`, `trace` |
-| `VERCEL_PLUGIN_DEBUG` | — | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_HOOK_DEBUG` | — | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_HOOK_DEDUP` | — | `off` to disable dedup entirely |
-| `VERCEL_PLUGIN_INJECTION_BUDGET` | `18000` | PreToolUse byte budget (bytes) |
-| `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` | `8000` | UserPromptSubmit byte budget (bytes) |
-| `VERCEL_PLUGIN_REVIEW_THRESHOLD` | `3` | Number of TSX edits before injecting `react-best-practices` |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | — | Path to audit log file, or `off` to disable |
-| `VERCEL_PLUGIN_LEXICAL_RESULT_MIN_SCORE` | `5.0` | Minimum score for lexical fallback results |
-
-### Claude Code-Provided Variables
-
-These are set by Claude Code itself and used by hooks.
-
-| Variable | Description |
-|----------|-------------|
-| `CLAUDE_ENV_FILE` | Path to env file for persisting variables across hook invocations |
-| `CLAUDE_PLUGIN_ROOT` | Root directory of the plugin installation |
-| `CLAUDE_PROJECT_ROOT` | Root directory of the user's project |
-| `SESSION_ID` | Fallback session ID (used when not provided in stdin) |
diff --git a/docs/observability.md b/docs/observability.md
deleted file mode 100644
index 6bc26cb..0000000
--- a/docs/observability.md
+++ /dev/null
@@ -1,436 +0,0 @@
-# Observability Guide
-
-How to monitor, debug, and trace the vercel-plugin's skill injection behavior. Covers the structured logging system, audit log configuration, and dedup debugging strategies.
-
----
-
-## Table of Contents
-
-1. [Log Levels](#log-levels)
- - [off (default)](#off-default)
- - [summary](#summary)
- - [debug](#debug)
- - [trace](#trace)
-2. [Structured JSON Logging](#structured-json-logging)
- - [Log Format](#log-format)
- - [Common Event Types](#common-event-types)
- - [Invocation ID Correlation](#invocation-id-correlation)
-3. [Audit Log File](#audit-log-file)
- - [Configuration](#audit-log-configuration)
- - [Log Location](#audit-log-location)
- - [Format and Contents](#audit-log-format)
-4. [Dedup Debugging](#dedup-debugging)
- - [Dedup Architecture](#dedup-architecture)
- - [Strategy Fallback Chain](#strategy-fallback-chain)
- - [Inspecting Dedup State](#inspecting-dedup-state)
- - [Common Dedup Issues](#common-dedup-issues)
-5. [Environment Variable Quick Reference](#environment-variable-quick-reference)
-6. [Debugging Decision Tree](#debugging-decision-tree)
-
----
-
-## Log Levels
-
-The plugin uses a four-tier log level system, controlled by `VERCEL_PLUGIN_LOG_LEVEL`. All log output goes to **stderr** as structured JSON — it never contaminates hook stdout (which is reserved for `SyncHookJSONOutput`).
-
-### Level Resolution Order
-
-The logger checks environment variables in this order:
-
-1. `VERCEL_PLUGIN_LOG_LEVEL` — explicit level name (`off`, `summary`, `debug`, `trace`)
-2. `VERCEL_PLUGIN_DEBUG=1` — legacy flag, maps to `debug`
-3. `VERCEL_PLUGIN_HOOK_DEBUG=1` — legacy flag, maps to `debug`
-4. Falls back to `off` if nothing is set
-
-If `VERCEL_PLUGIN_LOG_LEVEL` is set to an unrecognized value, the logger prints a warning to stderr and falls back to `off`.
-
-### off (default)
-
-```bash
-# No environment variable needed — this is the default
-unset VERCEL_PLUGIN_LOG_LEVEL
-```
-
-No log output. Hooks run silently, producing only their JSON result on stdout. This is the production default to avoid any performance overhead.
-
-### summary
-
-```bash
-export VERCEL_PLUGIN_LOG_LEVEL=summary
-```
-
-Emits outcome, latency, and issue reports. Best for monitoring hook health without noise.
-
-**Example output** (one line per event, pretty-printed here for readability):
-
-```json
-{
- "invocationId": "a3f1c02e",
- "event": "complete",
- "timestamp": "2026-03-10T14:22:01.123Z",
- "reason": "injected",
- "matchedCount": 4,
- "injectedCount": 2,
- "dedupedCount": 1,
- "cappedCount": 1,
- "injectedSkills": ["nextjs", "vercel-functions"],
- "droppedByCap": ["routing-middleware"],
- "elapsed_ms": 12
-}
-```
-
-The `complete` event fires at the end of every hook invocation and summarizes what happened:
-
-| Field | Description |
-|-------|-------------|
-| `matchedCount` | Skills whose patterns matched the trigger |
-| `injectedCount` | Skills actually injected into context |
-| `dedupedCount` | Skills skipped because already seen this session |
-| `cappedCount` | Skills dropped by the per-invocation cap (5 for PreToolUse, 2 for UserPromptSubmit) |
-| `droppedByCap` | Names of cap-dropped skills |
-| `droppedByBudget` | Names of budget-dropped skills |
-| `boostsApplied` | Description of priority boosts that fired |
-| `elapsed_ms` | Wall-clock time for the entire hook |
-
-**Issue events** appear when something went wrong:
-
-```json
-{
- "invocationId": "a3f1c02e",
- "event": "issue",
- "timestamp": "2026-03-10T14:22:01.125Z",
- "code": "DEDUP_CLAIM_FAIL",
- "message": "Could not create claim file",
- "hint": "Check /tmp permissions",
- "context": { "skill": "nextjs", "errno": -13 }
-}
-```
-
-### debug
-
-```bash
-export VERCEL_PLUGIN_LOG_LEVEL=debug
-```
-
-Adds match reasons, dedup state, skill map statistics, and decision traces. Use this when investigating **why** a specific skill was or wasn't injected.
-
-**Additional events at debug level:**
-
-```json
-{
- "invocationId": "a3f1c02e",
- "event": "decision:match",
- "timestamp": "2026-03-10T14:22:01.110Z",
- "hook": "pretooluse-skill-inject",
- "skill": "nextjs",
- "score": 11,
- "reason": "pathPattern matched: app/**/page.tsx"
-}
-```
-
-```json
-{
- "invocationId": "a3f1c02e",
- "event": "decision:dedup",
- "timestamp": "2026-03-10T14:22:01.111Z",
- "hook": "pretooluse-skill-inject",
- "skill": "vercel-storage",
- "reason": "already seen (claim file exists)"
-}
-```
-
-```json
-{
- "invocationId": "a3f1c02e",
- "event": "decision:boost",
- "timestamp": "2026-03-10T14:22:01.112Z",
- "hook": "pretooluse-skill-inject",
- "skill": "nextjs",
- "score": 16,
- "reason": "profiler boost +5"
-}
-```
-
-### trace
-
-```bash
-export VERCEL_PLUGIN_LOG_LEVEL=trace
-```
-
-Maximum verbosity. Adds per-pattern evaluation details — every glob, regex, and import pattern tested against the current trigger. **Use sparingly** — this generates significant output, especially in projects with many skills.
-
-**Example trace events:**
-
-```json
-{
- "invocationId": "a3f1c02e",
- "event": "pattern:test",
- "timestamp": "2026-03-10T14:22:01.105Z",
- "skill": "vercel-storage",
- "patternType": "path",
- "pattern": "**/*.prisma",
- "input": "app/api/route.ts",
- "matched": false
-}
-```
-
-At trace level, you see every pattern the engine evaluates, making it possible to understand exactly why a skill did or didn't match.
-
----
-
-## Structured JSON Logging
-
-### Log Format
-
-Every log line is a single JSON object written to **stderr** via `process.stderr.write()`. Lines are newline-delimited (JSONL format).
-
-**Standard fields present on every log line:**
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `invocationId` | `string` | 8-char hex ID shared across all log lines from one hook process |
-| `event` | `string` | Event name (e.g., `complete`, `decision:match`, `pattern:test`) |
-| `timestamp` | `string` | ISO 8601 timestamp |
-
-Additional fields vary by event type. The logger never emits log lines when the level is `off`.
-
-### Common Event Types
-
-| Event | Level | Description |
-|-------|-------|-------------|
-| `complete` | summary | Hook invocation summary with counts and timing |
-| `issue` | summary | Warning or error encountered during execution |
-| `decision:match` | debug | A skill matched the current trigger |
-| `decision:dedup` | debug | A skill was skipped due to dedup |
-| `decision:boost` | debug | A priority boost was applied |
-| `decision:budget` | debug | A skill was dropped or summarized due to budget |
-| `decision:suppress` | debug | A skill was suppressed (e.g., `noneOf` match) |
-| `pattern:test` | trace | Individual pattern evaluation result |
-| `prompt:score` | debug | Prompt signal scoring breakdown for a skill |
-
-### Invocation ID Correlation
-
-All hook modules running in the same Node.js process share a single `invocationId`. This is stored on `globalThis` via a shared key, so even if multiple modules import and create their own logger, they all emit the same ID.
-
-To filter logs for a single hook invocation:
-
-```bash
-# Filter by invocation ID
-cat /dev/stderr 2>&1 | grep '"invocationId":"a3f1c02e"' | jq .
-```
-
----
-
-## Audit Log File
-
-The audit log provides a persistent, append-only record of every skill injection decision. Unlike stderr logging (which is ephemeral), the audit log writes to a file on disk.
-
-### Audit Log Configuration
-
-| Variable | Value | Behavior |
-|----------|-------|----------|
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | *(unset)* | Writes to default location (see below) |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | `/path/to/file.jsonl` | Writes to the specified path (resolved relative to project root) |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | `off` | Disables audit logging entirely |
-
-```bash
-# Use the default location
-unset VERCEL_PLUGIN_AUDIT_LOG_FILE
-
-# Write to a custom path
-export VERCEL_PLUGIN_AUDIT_LOG_FILE=./logs/plugin-audit.jsonl
-
-# Disable audit logging
-export VERCEL_PLUGIN_AUDIT_LOG_FILE=off
-```
-
-### Audit Log Location
-
-When no explicit path is configured, the audit log writes to:
-
-```
-~/.claude/projects//vercel-plugin/skill-injections.jsonl
-```
-
-Where `` is the project root path with `/` replaced by `-`. The directory is created automatically if it doesn't exist.
-
-The project root is resolved in order: `CLAUDE_PROJECT_ROOT` > hook input `cwd` > `process.cwd()`.
-
-### Audit Log Format
-
-Each line is a JSON object (JSONL format) recording what was injected and why. The audit log captures the same structured data as the `complete` event at summary level, plus additional context about the trigger.
-
----
-
-## Dedup Debugging
-
-The dedup system prevents the same skill from being injected twice in a single Claude Code session. It uses a three-layer architecture for resilience.
-
-### Dedup Architecture
-
-```mermaid
-flowchart TD
- subgraph "Three-Layer State"
- A["Claim Directory
/vercel-plugin--seen-skills.d/
One empty file per skill"]
- B["Session File
/vercel-plugin--seen-skills.txt
Comma-delimited snapshot"]
- C["Env Var
VERCEL_PLUGIN_SEEN_SKILLS
Comma-delimited in CLAUDE_ENV_FILE"]
- end
-
- D["mergeSeenSkillStates()"] --> E["Union of all three sources"]
- A --> D
- B --> D
- C --> D
-
- F["session-start-seen-skills"] -->|"Initializes"| C
- G["pretooluse-skill-inject"] -->|"Creates claims"| A
- G -->|"Updates"| B
- G -->|"Updates"| C
- H["user-prompt-submit"] -->|"Creates claims"| A
- H -->|"Updates"| B
- H -->|"Updates"| C
- I["session-end-cleanup"] -->|"Deletes"| A
- I -->|"Deletes"| B
-```
-
-### Strategy Fallback Chain
-
-The dedup system uses a strategy chain. If the primary strategy fails, it falls back to the next:
-
-| Strategy | Mechanism | When Used | Pros | Cons |
-|----------|-----------|-----------|------|------|
-| **file** | Atomic claims via `O_EXCL` | Default — `/tmp` writable | Race-condition safe, survives process restarts | Requires writable `/tmp` |
-| **env-var** | `VERCEL_PLUGIN_SEEN_SKILLS` in `CLAUDE_ENV_FILE` | Fallback if `/tmp` fails | No filesystem dependency | Can drift if env file updates race |
-| **memory-only** | In-memory `Set` within a single process | Fallback if env file unavailable | Zero I/O | Lost between hook invocations |
-| **disabled** | No dedup at all | `VERCEL_PLUGIN_HOOK_DEDUP=off` | Useful for testing | Skills may inject multiple times |
-
-To force a specific strategy for debugging:
-
-```bash
-# Disable dedup entirely (skills inject every time they match)
-export VERCEL_PLUGIN_HOOK_DEDUP=off
-
-# Watch which strategy the dedup system selects (requires debug level)
-export VERCEL_PLUGIN_LOG_LEVEL=debug
-```
-
-At debug level, the logger emits `decision:dedup` events showing which strategy was used and whether a skill was already seen.
-
-### Inspecting Dedup State
-
-**Check the claim directory:**
-
-```bash
-# Find the claim directory for the current session
-ls /tmp/vercel-plugin-*-seen-skills.d/
-
-# List all claimed (already-injected) skills
-ls /tmp/vercel-plugin-*-seen-skills.d/ 2>/dev/null
-```
-
-**Check the session file:**
-
-```bash
-cat /tmp/vercel-plugin-*-seen-skills.txt 2>/dev/null
-```
-
-**Check the env var:**
-
-```bash
-echo $VERCEL_PLUGIN_SEEN_SKILLS
-```
-
-**Force a clean slate** (resets dedup for the current session):
-
-```bash
-# Remove claim directory and session file
-rm -rf /tmp/vercel-plugin-*-seen-skills.d/
-rm -f /tmp/vercel-plugin-*-seen-skills.txt
-```
-
-### Common Dedup Issues
-
-| Symptom | Likely Cause | Fix |
-|---------|-------------|-----|
-| Skill injects every time | Dedup disabled (`VERCEL_PLUGIN_HOOK_DEDUP=off`) or `/tmp` not writable | Check env var; verify `/tmp` permissions |
-| Skill never injects after first time | Working correctly — this is expected behavior | If you need re-injection, clear dedup state (see above) |
-| Skill injects in PreToolUse but also in UserPromptSubmit | Claim directory not shared between hooks | Check that both hooks resolve the same session ID from `CLAUDE_SESSION_ID` |
-| Stale claim files from old sessions | `session-end-cleanup` didn't run (e.g., Claude Code crashed) | Manually delete old `/tmp/vercel-plugin-*` files |
-
----
-
-## Environment Variable Quick Reference
-
-| Variable | Default | Purpose |
-|----------|---------|---------|
-| `VERCEL_PLUGIN_LOG_LEVEL` | `off` | Log verbosity: `off` / `summary` / `debug` / `trace` |
-| `VERCEL_PLUGIN_DEBUG` | — | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_HOOK_DEBUG` | — | Legacy: `1` maps to `debug` level |
-| `VERCEL_PLUGIN_AUDIT_LOG_FILE` | *(auto)* | Audit log path, or `off` to disable |
-| `VERCEL_PLUGIN_HOOK_DEDUP` | — | Set to `off` to disable dedup entirely |
-| `VERCEL_PLUGIN_SEEN_SKILLS` | `""` | Comma-delimited seen skills (managed by hooks) |
-| `VERCEL_PLUGIN_LIKELY_SKILLS` | — | Profiler-set skills (+5 boost) |
-| `VERCEL_PLUGIN_INJECTION_BUDGET` | `18000` | PreToolUse byte budget |
-| `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` | `8000` | UserPromptSubmit byte budget |
-| `VERCEL_PLUGIN_REVIEW_THRESHOLD` | `3` | TSX edits before react-best-practices injection |
-| `VERCEL_PLUGIN_TSX_EDIT_COUNT` | `0` | Current .tsx edit count |
-| `VERCEL_PLUGIN_LEXICAL_RESULT_MIN_SCORE` | `5.0` | Minimum score for lexical fallback |
-
----
-
-## Debugging Decision Tree
-
-Use this flowchart to diagnose common issues:
-
-```mermaid
-flowchart TD
- START["Skill not injecting?"] --> CHECK_MATCH{"Does the skill
match the trigger?"}
-
- CHECK_MATCH -->|"Not sure"| EXPLAIN["Run: bun run explain "]
- EXPLAIN --> CHECK_MATCH
-
- CHECK_MATCH -->|"No"| FIX_PATTERNS["Check pathPatterns/bashPatterns/
importPatterns in SKILL.md frontmatter"]
-
- CHECK_MATCH -->|"Yes"| CHECK_DEDUP{"Already seen
this session?"}
-
- CHECK_DEDUP -->|"Check"| INSPECT_DEDUP["ls /tmp/vercel-plugin-*-seen-skills.d/"]
- CHECK_DEDUP -->|"Yes"| CLEAR_DEDUP["Clear dedup state or
set VERCEL_PLUGIN_HOOK_DEDUP=off"]
-
- CHECK_DEDUP -->|"No"| CHECK_BUDGET{"Within budget?"}
-
- CHECK_BUDGET -->|"Check"| DEBUG_LOG["Set VERCEL_PLUGIN_LOG_LEVEL=debug
and look for decision:budget events"]
- CHECK_BUDGET -->|"Over budget"| INCREASE_BUDGET["Increase VERCEL_PLUGIN_INJECTION_BUDGET
or raise skill priority"]
-
- CHECK_BUDGET -->|"Within budget"| CHECK_PRIORITY{"Priority high enough?"}
-
- CHECK_PRIORITY -->|"Check"| EXPLAIN2["Run: bun run explain
Look at effective priority"]
- CHECK_PRIORITY -->|"Too low"| RAISE_PRIORITY["Increase metadata.priority
or add profiler detection"]
-
- CHECK_PRIORITY -->|"High enough"| DOCTOR["Run: bun run doctor
for manifest parity issues"]
-```
-
-**Quick debugging commands:**
-
-```bash
-# See why a skill matches (or doesn't) for a given file
-bun run explain app/api/route.ts
-
-# See why a skill matches for a bash command
-bun run explain "vercel deploy --prod"
-
-# Full diagnostic check
-bun run doctor
-
-# Enable debug logging for the next Claude Code session
-export VERCEL_PLUGIN_LOG_LEVEL=debug
-
-# Enable maximum trace logging
-export VERCEL_PLUGIN_LOG_LEVEL=trace
-```
-
----
-
-## See Also
-
-- [Operations & Debugging](./04-operations-debugging.md) — complete operations guide including CLI tools
-- [Reference](./05-reference.md) — full environment variable table and hook registry
-- [Glossary](./glossary.md) — definitions of project-specific terms
diff --git a/docs/skill-authoring.md b/docs/skill-authoring.md
deleted file mode 100644
index 33c0061..0000000
--- a/docs/skill-authoring.md
+++ /dev/null
@@ -1,846 +0,0 @@
-# Skill Authoring & Frontmatter Reference
-
-> **Audience**: Skill authors — anyone adding new skills or extending existing ones.
-
-This guide walks you through creating a new skill from scratch, explains every frontmatter field, documents the scoring engine, the validation system, the manifest build pipeline, the template include engine, and the custom YAML parser's non-standard behavior. It includes annotated real-world examples from the `skills/` directory.
-
----
-
-## Table of Contents
-
-1. [User Story: Adding a New Skill End-to-End](#user-story-adding-a-new-skill-end-to-end)
-2. [SKILL.md Frontmatter Schema](#skillmd-frontmatter-schema)
- - [Top-Level Fields](#top-level-fields)
- - [metadata Object](#metadata-object)
- - [promptSignals Object](#promptsignals-object)
- - [validate Array](#validate-array)
- - [retrieval Object](#retrieval-object)
-3. [Annotated Real Skill Examples](#annotated-real-skill-examples)
- - [Example 1: nextjs (Complex — Signals + Validation)](#example-1-nextjs)
- - [Example 2: email (Minimal — Patterns Only)](#example-2-email)
-4. [Pattern Matching Reference](#pattern-matching-reference)
- - [pathPatterns (Globs)](#pathpatterns-globs)
- - [bashPatterns (Regex)](#bashpatterns-regex)
- - [importPatterns (Package Matchers)](#importpatterns-package-matchers)
-5. [Prompt Signal Scoring](#prompt-signal-scoring)
-6. [Validation Rules](#validation-rules)
-7. [Manifest Build Pipeline](#manifest-build-pipeline)
-8. [Template Include Engine](#template-include-engine)
- - [Section Includes](#section-includes)
- - [Frontmatter Includes](#frontmatter-includes)
- - [Build Workflow](#build-workflow)
-9. [Custom YAML Parser Gotchas](#custom-yaml-parser-gotchas)
-10. [Build & Test Workflow](#build--test-workflow)
-
----
-
-## User Story: Adding a New Skill End-to-End
-
-> **Scenario**: You're a developer on the Vercel plugin team. Vercel just shipped a new feature — say, "Edge Config" — and you want Claude to automatically inject best-practice guidance whenever a developer touches Edge Config files, runs related commands, or asks about it in a prompt.
-
-### The journey
-
-```mermaid
-flowchart LR
- A["1. Create directory
skills/edge-config/"] --> B["2. Write SKILL.md
frontmatter + body"]
- B --> C["3. Build manifest
bun run build:manifest"]
- C --> D["4. Validate
bun run validate"]
- D --> E["5. Test with explain CLI
bun run explain -- --file edge-config.json"]
- E --> F["6. Run tests
bun test"]
- F --> G["7. Build all & commit
bun run build"]
-```
-
-### Step 1 — Create the skill directory
-
-```bash
-mkdir -p skills/edge-config
-```
-
-Skills are keyed by **directory name**, not the frontmatter `name` field. The directory name is the canonical identifier used everywhere: dedup, manifest, env vars, and logs.
-
-### Step 2 — Write `SKILL.md`
-
-Create `skills/edge-config/SKILL.md` with YAML frontmatter between `---` delimiters, followed by the guidance body in markdown. See the [Frontmatter Schema](#skillmd-frontmatter-schema) section for every available field.
-
-Minimal skeleton:
-
-```markdown
----
-name: edge-config
-description: "Best practices for Vercel Edge Config — a low-latency global data store"
-summary: "Edge Config: use read() not get(), prefer JSON values"
-metadata:
- priority: 6
- pathPatterns:
- - "edge-config.*"
- bashPatterns:
- - "\\bedge.config\\b"
- importPatterns:
- - "@vercel/edge-config"
- promptSignals:
- phrases:
- - "edge config"
- minScore: 6
-validate:
- - pattern: "edgeConfig\\.get\\("
- message: "Use edgeConfig.read() instead of .get() — read() returns typed values"
- severity: error
----
-
-# Edge Config
-
-You are an expert in Vercel Edge Config...
-```
-
-### Step 3 — Build the manifest
-
-```bash
-bun run build:manifest
-```
-
-This reads all `skills/*/SKILL.md`, extracts frontmatter, compiles glob→regex and import→regex at build time, and writes `generated/skill-manifest.json`. Hooks read the manifest at runtime for fast matching — they never parse SKILL.md live.
-
-### Step 4 — Validate
-
-```bash
-bun run validate
-```
-
-Checks that frontmatter parses, required fields are present, patterns are valid (globs compile, regexes parse), and the manifest is in sync.
-
-### Step 5 — Test with the explain CLI
-
-```bash
-# File path matching
-bun run scripts/explain.ts --file edge-config.json
-
-# Bash command matching
-bun run scripts/explain.ts --bash "vercel edge-config ls"
-
-# With profiler boost simulation
-bun run scripts/explain.ts --file edge-config.json --likely-skills edge-config
-```
-
-The explain command mirrors runtime logic exactly — it shows priority scores, match reasons, and whether your skill would be injected within the budget.
-
-### Step 6 — Run the full test suite
-
-```bash
-bun test
-```
-
-### Step 7 — Build everything and commit
-
-```bash
-bun run build # hooks + manifest + from-skills
-bun test # final verification
-```
-
-### What happens at runtime after you ship
-
-1. **SessionStart** — The profiler scans `package.json`. If `@vercel/edge-config` is a dependency, your skill gets a **+5 priority boost** via `VERCEL_PLUGIN_LIKELY_SKILLS`.
-2. **PreToolUse** — When Claude reads `edge-config.json` or runs `vercel env pull`, your `pathPatterns` and `bashPatterns` match → the skill is ranked, deduped, and injected within the 18KB budget (max 3 skills).
-3. **UserPromptSubmit** — When the developer types "how do I set up edge config?", your `promptSignals.phrases` score +6 → the skill injects within the 8KB budget (max 2 skills).
-4. **PostToolUse** — When Claude writes to a matched file, your `validate` rules run and flag antipatterns.
-
----
-
-## User Story: TSX Edit Trigger (react-best-practices)
-
-> **Scenario**: You're a developer building a React dashboard. You've been editing `.tsx` components for a while — adding hooks, state, and effects. After your 3rd `.tsx` edit, Claude suddenly has React best-practices guidance it didn't have before.
-
-### What's happening under the hood
-
-The PreToolUse hook tracks `.tsx` edits via `VERCEL_PLUGIN_TSX_EDIT_COUNT`. When the count hits the threshold (default 3, configurable via `VERCEL_PLUGIN_REVIEW_THRESHOLD`), the `react-best-practices` skill injects with a **+40 priority boost**.
-
-```mermaid
-sequenceDiagram
- participant Dev as Developer
- participant CC as Claude Code
- participant Hook as PreToolUse
-
- Dev->>CC: Edit Button.tsx (add onClick handler)
- CC->>Hook: Edit tool on Button.tsx
- Note over Hook: TSX count: 0 → 1
-
- Dev->>CC: Edit Dashboard.tsx (add useEffect)
- CC->>Hook: Edit tool on Dashboard.tsx
- Note over Hook: TSX count: 1 → 2
-
- Dev->>CC: Edit Sidebar.tsx (add useState)
- CC->>Hook: Edit tool on Sidebar.tsx
- Note over Hook: TSX count: 2 → 3 ← threshold!
- Note over Hook: Inject react-best-practices (+40 boost)
- Hook-->>CC: React guidance injected
-
- Note over CC: Claude now flags missing
"use client", suggests memoization,
checks hook dependencies
-
- Dev->>CC: Edit Header.tsx
- CC->>Hook: Edit tool on Header.tsx
- Note over Hook: TSX count: 0 → 1 (reset after injection)
-```
-
-### How to configure this in your skill
-
-The TSX review trigger is hard-coded to the `react-best-practices` skill — you can't create custom edit-count triggers for other skills. But understanding the pattern helps you design skills that complement it: if your skill is about a React library (e.g., `swr`, `shadcn`), its patterns will fire alongside `react-best-practices` when matching files are edited.
-
----
-
-## User Story: Dev Server Detection (agent-browser-verify)
-
-> **Scenario**: You've just finished building a login form and ask Claude to start the dev server. Claude not only starts `npm run dev`, but also opens a browser to visually verify the form renders correctly.
-
-### What's happening under the hood
-
-The PreToolUse hook detects dev server commands (`next dev`, `npm run dev`, `pnpm dev`, `bun dev`, `vite`, etc.) and injects `agent-browser-verify` with a **+45 priority boost** — but only if the `agent-browser` CLI is installed.
-
-```mermaid
-flowchart TD
- BASH["Bash tool: npm run dev"] --> DETECT{"Matches dev
server pattern?"}
- DETECT -->|No| NORMAL["Normal pattern matching"]
- DETECT -->|Yes| AVAIL{"agent-browser
CLI installed?"}
- AVAIL -->|No| NOTICE["Inject unavailability notice
(suggest installation)"]
- AVAIL -->|Yes| GUARD{"Loop guard:
count < 2?"}
- GUARD -->|No| SKIP["Skip (prevent infinite loops)"]
- GUARD -->|Yes| INJECT["Inject agent-browser-verify (+45)
+ verification companion (summary)"]
-```
-
-### Key design details
-
-- The session-start profiler sets `VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE=1` if the CLI is found on PATH
-- A **loop guard** caps injection at 2 per session (`VERCEL_PLUGIN_DEV_VERIFY_COUNT`) — repeated `npm run dev` restarts won't flood context
-- The `verification` skill is co-injected as a **summary-only companion** to provide a verification checklist without consuming too much budget
-
----
-
-## User Story: Prompt Signal Matching (UserPromptSubmit)
-
-> **Scenario**: A developer types "how do I add a durable workflow that survives crashes?" into Claude Code. Without touching any files, Claude gets the `workflow` skill injected because the prompt signals match.
-
-### What's happening under the hood
-
-The UserPromptSubmit hook normalizes the prompt and scores it against every skill's `promptSignals`:
-
-**Normalized prompt**: `"how do i add a durable workflow that survive crash"`
-(lowercase → contraction expansion → stemming: "survives" → "survive", "crashes" → "crash")
-
-**Scoring for `workflow` skill**:
-
-```yaml
-# workflow's promptSignals:
-phrases: ["durable workflow"] # substring match → +6
-allOf: [["workflow", "durable"]] # both present → +4
-anyOf: ["durable", "reliable"] # "durable" +1 → +1
-minScore: 4
-```
-
-| Component | Match | Score |
-|-----------|-------|-------|
-| phrase: `"durable workflow"` | Yes | +6 |
-| allOf: `["workflow", "durable"]` | Both present | +4 |
-| anyOf: `"durable"` | Yes | +1 |
-| **Total** | | **11** |
-
-11 >= minScore 4 → **skill injects** within the 8KB / 2-skill budget.
-
-### How to write effective prompt signals for your skill
-
-1. **Start with phrases** — these are the strongest signals (+6 each). Use the exact phrases your users type: `"edge config"`, `"ai sdk"`, `"deploy to vercel"`.
-
-2. **Add allOf groups for concepts** — combinations of terms that together indicate intent: `[["cron", "schedule"]]`, `[["streaming", "response"]]`. Each fully-matched group scores +4.
-
-3. **Sprinkle anyOf for weak signals** — individual terms that slightly boost relevance: `"timeout"`, `"cache"`, `"optimize"`. Capped at +2 total to prevent noise.
-
-4. **Use noneOf to suppress false positives** — terms that indicate the user is asking about something else: `["github actions", ".github/workflows"]` in the workflow skill prevents matching GitHub CI prompts.
-
-5. **Set minScore appropriately** — default 6 (one phrase match). Lower to 4 for broad skills like `investigation-mode` that should match on weaker signals.
-
----
-
-## SKILL.md Frontmatter Schema
-
-Every `SKILL.md` begins with a YAML frontmatter block between `---` delimiters. Below is the complete schema with types, defaults, and descriptions.
-
-### Top-Level Fields
-
-| Field | Type | Required | Default | Description |
-|-------|------|----------|---------|-------------|
-| `name` | `string` | No | directory name | Human-readable name. Falls back to the directory name if omitted. |
-| `description` | `string` | **Yes** | — | One-line description of the skill's purpose. Used in manifest, logs, and lexical fallback scoring. |
-| `summary` | `string` | Recommended | — | Brief fallback text injected when the full body exceeds the byte budget. Keep under ~200 chars. |
-| `metadata` | `object` | **Yes** | — | Contains all matching, scoring, and validation configuration. |
-| `validate` | `object[]` | No | `[]` | PostToolUse validation rules (can also live inside `metadata`). |
-| `retrieval` | `object` | No | — | Discovery metadata for search/retrieval systems. |
-
-### metadata Object
-
-| Field | Type | Default | Description |
-|-------|------|---------|-------------|
-| `priority` | `number` | `5` | Injection priority (range 4–8). Higher = injected first when multiple skills match. |
-| `pathPatterns` | `string[]` | `[]` | Glob patterns for file path matching. Compiled to regex at build time. |
-| `bashPatterns` | `string[]` | `[]` | JavaScript regex patterns for bash command matching. |
-| `importPatterns` | `string[]` | `[]` | Package name patterns for import/require matching. Supports `*` for scoped wildcards. |
-| `promptSignals` | `object` | — | Prompt-based scoring configuration (see below). |
-
-### promptSignals Object
-
-Controls how the **UserPromptSubmit** hook scores user prompts against this skill.
-
-| Field | Type | Default | Description |
-|-------|------|---------|-------------|
-| `phrases` | `string[]` | `[]` | Exact substring matches (case-insensitive). Each hit scores **+6**. |
-| `allOf` | `string[][]` | `[]` | Groups of terms that must **all** appear in the prompt. Each fully-matched group scores **+4**. |
-| `anyOf` | `string[]` | `[]` | Optional terms. Each hit scores **+1**, capped at **+2 total**. |
-| `noneOf` | `string[]` | `[]` | Suppression terms. **Any match sets the score to -Infinity** (hard suppress — skill never injects). |
-| `minScore` | `number` | `6` | Minimum total score required for the skill to be injected. |
-
-### validate Array
-
-Each entry defines a PostToolUse validation rule that runs when Claude writes or edits a file matched by the skill's path patterns.
-
-| Field | Type | Required | Default | Description |
-|-------|------|----------|---------|-------------|
-| `pattern` | `string` | **Yes** | — | Regex pattern to search for in the written file content. |
-| `message` | `string` | **Yes** | — | Error/warning message shown to Claude when pattern matches. Should be **actionable** (tell Claude what to do, not just what's wrong). |
-| `severity` | `"error"` or `"warn"` | **Yes** | — | `error` = Claude must fix before proceeding. `warn` = advisory. |
-| `skipIfFileContains` | `string` | No | — | Regex — if the file also matches this pattern, skip the rule entirely. Prevents false positives. |
-
-### retrieval Object
-
-Optional metadata for discovery and search systems.
-
-| Field | Type | Description |
-|-------|------|-------------|
-| `aliases` | `string[]` | Alternative names users might search for (e.g., `["vercel ai", "ai library"]`). |
-| `intents` | `string[]` | User intents this skill addresses (e.g., `["add ai to app", "set up streaming"]`). |
-| `entities` | `string[]` | Key API symbols, functions, or types (e.g., `["useChat", "streamText"]`). |
-
----
-
-## Annotated Real Skill Examples
-
-### Example 1: nextjs
-
-**File**: `skills/nextjs/SKILL.md` — a complex skill with prompt signals, 11 validation rules, and broad pattern coverage.
-
-```yaml
----
-name: nextjs
-description: Next.js App Router expert guidance. Use when building, debugging,
- or architecting Next.js applications — routing, Server Components, Server
- Actions, Cache Components, layouts, middleware/proxy, data fetching,
- rendering strategies, and deployment on Vercel.
-metadata:
- priority: 5 # ← Default priority; not boosted
- # because Next.js is so common the
- # profiler adds +5 when detected.
- pathPatterns:
- - 'next.config.*' # ← Matches next.config.js, .mjs, .ts
- - 'next-env.d.ts'
- - 'app/**' # ← Catches all App Router files
- - 'pages/**' # ← Also catches Pages Router (for migration guidance)
- - 'src/app/**' # ← Common src/ layout variant
- - 'src/pages/**'
- - 'tailwind.config.*' # ← Tailwind is tightly coupled with Next.js projects
- - 'postcss.config.*'
- - 'tsconfig.json'
- - 'tsconfig.*.json'
- - 'apps/*/app/**' # ← Monorepo patterns (Turborepo convention)
- - 'apps/*/pages/**'
- - 'apps/*/src/app/**'
- - 'apps/*/src/pages/**'
- - 'apps/*/next.config.*'
-
- bashPatterns:
- - '\bnext\s+(dev|build|start|lint)\b' # ← Core Next.js CLI commands
- - '\bnext\s+experimental-analyze\b' # ← Bundle analyzer
- - '\bnpx\s+create-next-app\b' # ← Project scaffolding
- - '\bbunx\s+create-next-app\b'
- - '\bnpm\s+run\s+(dev|build|start)\b' # ← npm scripts that likely invoke Next.js
- - '\bpnpm\s+(dev|build)\b'
- - '\bbun\s+run\s+(dev|build)\b'
-
- promptSignals:
- phrases: # ← Each phrase hit = +6
- - "next.js"
- - "nextjs"
- - "app router"
- - "server component"
- - "server action"
- allOf: # ← All terms in group must match = +4
- - [middleware, next] # "next middleware" → +4
- - [layout, route] # "route layout" → +4
- anyOf: # ← Each hit = +1, capped at +2
- - "pages router"
- - "getserversideprops"
- - "use server"
- noneOf: [] # ← No suppression terms
- minScore: 6 # ← One phrase match is enough
-
-validate: # ← 11 rules catching common mistakes
- - pattern: export.*getServerSideProps
- message: 'getServerSideProps is removed in App Router — use server
- components or route handlers'
- severity: error # ← error = Claude must fix
-
- - pattern: (useState|useEffect)
- message: 'React hooks require "use client" directive — add it at the
- top of client components'
- severity: warn
- skipIfFileContains: "^['\"]use client['\"]"
- # ↑ Skip if file already has "use client"
-
- - pattern: useRef\(\s*\)
- message: 'useRef() requires an initial value in React 19 — use useRef(null)'
- severity: error
-
- - pattern: (? NORM["Normalize:
lowercase, expand contractions,
collapse whitespace"]
- NORM --> NONE{"noneOf
matches?"}
- NONE -->|"Yes"| SUPPRESS["-∞ → hard suppress"]
- NONE -->|"No"| PHRASES["phrases: +6 each"]
- PHRASES --> ALLOF["allOf: +4 per complete group"]
- ALLOF --> ANYOF["anyOf: +1 each, max +2"]
- ANYOF --> LEXICAL["Lexical fallback:
+2 if skill terms overlap"]
- LEXICAL --> TOTAL{"total ≥ minScore?"}
- TOTAL -->|"Yes"| INJECT["Inject skill
(up to 2 skills, 8KB budget)"]
- TOTAL -->|"No"| SKIP["Skip"]
-```
-
-### Scoring walkthrough
-
-Given a prompt: *"I want to add the ai sdk for streaming"*
-
-And a skill with:
-```yaml
-promptSignals:
- phrases: ["ai sdk"] # "ai sdk" is a substring → +6
- allOf: [["streaming", "generation"]] # Only "streaming" matched, not "generation" → +0
- anyOf: ["streaming"] # "streaming" present → +1
- minScore: 6
-```
-
-| Component | Matches | Score |
-|-----------|---------|-------|
-| phrases: "ai sdk" | Yes (substring) | +6 |
-| allOf: ["streaming", "generation"] | Partial (1 of 2) | +0 |
-| anyOf: "streaming" | Yes | +1 |
-| **Total** | | **7** |
-
-7 ≥ minScore 6 → **skill injects**.
-
-### Lexical fallback scoring
-
-When phrase/allOf/anyOf scoring yields a low result, the system tokenizes both the prompt and skill metadata (description, phrases, aliases, entities) and checks for significant term overlap. This adds up to **+2** and catches prompts that are topically relevant but don't match exact phrases.
-
----
-
-## Validation Rules
-
-Validation rules run in the **PostToolUse** hook after Claude writes or edits a file. The hook:
-
-1. Matches the written file path against all skills' `pathPatterns`
-2. For each matched skill, runs its `validate` rules against the file content
-3. Returns fix instructions as `additionalContext` for any violations
-
-```mermaid
-flowchart TD
- WRITE["Claude writes/edits file"] --> MATCH["Match file path → skills"]
- MATCH --> LOOP["For each matched skill"]
- LOOP --> RULE["For each validate rule"]
- RULE --> SKIP{"skipIfFileContains
matches?"}
- SKIP -->|"Yes"| NEXT["Skip rule"]
- SKIP -->|"No"| TEST{"pattern
matches file?"}
- TEST -->|"Yes"| REPORT["Report: severity + message"]
- TEST -->|"No"| NEXT
- NEXT --> RULE
-```
-
-### Best practices for validation rules
-
-- Use `severity: error` sparingly — only for patterns that **will** break functionality
-- Use `severity: warn` for style preferences or potential issues
-- Use `skipIfFileContains` to avoid false positives (e.g., skip "needs use client" if file already has it)
-- Keep `message` **actionable** — tell Claude what to do, not just what's wrong
-- Remember patterns are **regex**: escape special characters (`.` → `\\.`, `(` → `\\(`)
-
-### Real example: Next.js validation
-
-```yaml
-validate:
- # Error: will definitely break at runtime
- - pattern: (?(43 skills)"] -->|"build-manifest.ts"| MANIFEST["generated/skill-manifest.json"]
- MANIFEST -->|"imported by"| HOOKS["Runtime hooks
(pretooluse, user-prompt-submit)"]
-```
-
-### Pipeline: `SKILL.md` → `build-manifest.ts` → `skill-manifest.json`
-
-**Input**: All `skills/*/SKILL.md` files.
-
-**Processing** (`scripts/build-manifest.ts`):
-
-1. **Parse frontmatter** — extracts YAML between `---` delimiters using the custom parser
-2. **Compile pathPatterns** — each glob is converted to a regex via `globToRegex()`. Invalid globs are dropped (with warnings).
-3. **Compile bashPatterns** — each string is validated as a `RegExp`. Invalid patterns are dropped.
-4. **Compile importPatterns** — each pattern is converted via `importPatternToRegex()`, producing `{ source, flags }` pairs.
-5. **Paired arrays** — the manifest stores patterns and their compiled regex sources in parallel arrays with matching indices. If a pattern fails to compile, **both** the pattern and its regex slot are dropped to keep indices aligned.
-
-**Output** (`generated/skill-manifest.json`, version 2):
-
-```json
-{
- "generatedAt": "2026-03-10T...",
- "version": 2,
- "skills": {
- "nextjs": {
- "priority": 5,
- "summary": null,
- "pathPatterns": ["next.config.*", "app/**", ...],
- "pathRegexSources": ["^next\\.config\\.[^/]*$", ...],
- "bashPatterns": ["\\bnext\\s+(dev|build|start)\\b", ...],
- "bashRegexSources": ["\\bnext\\s+(dev|build|start)\\b", ...],
- "importPatterns": [],
- "importRegexSources": [],
- "bodyPath": "skills/nextjs/SKILL.md",
- "validate": [...],
- "promptSignals": { "phrases": [...], ... }
- }
- }
-}
-```
-
-### Build command
-
-```bash
-bun run build:manifest
-```
-
-This is also included in `bun run build` (which runs hooks + manifest + from-skills).
-
-### Keeping the manifest in sync
-
-- Run `bun run validate` to check manifest parity
-- Run `bun run doctor` for a comprehensive health check including manifest drift detection
-- The pre-commit hook does **not** auto-rebuild the manifest (only hooks are auto-compiled)
-
----
-
-## Template Include Engine
-
-Skills are the single source of truth for domain knowledge. Agents and commands pull content from skills at build time via `.md.tmpl` templates, so they stay in sync without duplicating prose.
-
-### Section Includes
-
-Extract a markdown section by heading:
-
-```
-{{include:skill::}}
-```
-
-**Behavior**: Finds the heading (case-insensitive) in the skill's markdown body and extracts everything from that heading to the next heading of **equal or higher level**. Code blocks are skipped during heading detection.
-
-**Example**: Given `skills/nextjs/SKILL.md` contains:
-
-```markdown
-## App Router
-
-Use the App Router for all new projects...
-
-### File Conventions
-
-- `page.tsx` — route entry point
-- `layout.tsx` — shared layout
-
-## Pages Router
-
-Legacy approach...
-```
-
-Then `{{include:skill:nextjs:App Router}}` extracts:
-
-```markdown
-## App Router
-
-Use the App Router for all new projects...
-
-### File Conventions
-
-- `page.tsx` — route entry point
-- `layout.tsx` — shared layout
-```
-
-It stops at `## Pages Router` because that's an equal-level heading.
-
-**Nested headings** are supported with `>` separator: `{{include:skill:env-vars:vercel env CLI > List Environment Variables}}` extracts a subsection under a parent heading.
-
-### Frontmatter Includes
-
-Extract a frontmatter field value:
-
-```
-{{include:skill::frontmatter:}}
-```
-
-Supports dotted paths for nested fields:
-
-```
-{{include:skill:nextjs:frontmatter:metadata.priority}} → "5"
-{{include:skill:email:frontmatter:description}} → "Email sending integration guidance..."
-```
-
-### Build Workflow
-
-```bash
-# Compile all .md.tmpl templates → .md files
-bun run build:from-skills
-
-# Check if generated .md files are up-to-date (CI mode, exits non-zero on drift)
-bun run build:from-skills:check
-```
-
-**Current templates** (8 files):
-
-| Template | Output |
-|----------|--------|
-| `agents/ai-architect.md.tmpl` | `agents/ai-architect.md` |
-| `agents/deployment-expert.md.tmpl` | `agents/deployment-expert.md` |
-| `agents/performance-optimizer.md.tmpl` | `agents/performance-optimizer.md` |
-| `commands/bootstrap.md.tmpl` | `commands/bootstrap.md` |
-| `commands/deploy.md.tmpl` | `commands/deploy.md` |
-| `commands/env.md.tmpl` | `commands/env.md` |
-| `commands/marketplace.md.tmpl` | `commands/marketplace.md` |
-| `commands/status.md.tmpl` | `commands/status.md` |
-
-**Diagnostic codes** (reported when includes fail):
-
-| Code | Meaning |
-|------|---------|
-| `SKILL_NOT_FOUND` | No `skills//SKILL.md` exists |
-| `HEADING_NOT_FOUND` | Heading doesn't exist in the skill body |
-| `FRONTMATTER_NOT_FOUND` | Field path doesn't exist in YAML |
-| `STALE_OUTPUT` | Generated `.md` is out of date |
-
-**Dependency tracking**: `generated/build-from-skills.manifest.json` records which templates depend on which skills, enabling incremental builds and CI staleness checks.
-
----
-
-## Custom YAML Parser Gotchas
-
-The plugin uses a custom inline YAML parser (`parseSimpleYaml` in `skill-map-frontmatter.mjs`), **not** the standard `js-yaml` library. This parser has intentional non-standard behavior:
-
-| Input | Standard YAML | This Parser |
-|-------|---------------|-------------|
-| `null` (bare) | JavaScript `null` | String `"null"` |
-| `true` (bare) | Boolean `true` | String `"true"` |
-| `false` (bare) | Boolean `false` | String `"false"` |
-| `[unclosed` | Parse error | Scalar string `"[unclosed"` |
-| Tab indent | Usually accepted | **Parse error** |
-
-### Key implications
-
-1. **No JavaScript nulls** — you never get `null` from frontmatter; everything is a string or number.
-2. **No booleans** — don't rely on boolean coercion; build scripts handle this explicitly.
-3. **Always close brackets** — a missing `]` silently turns your array into a useless string.
-4. **Spaces only** — the parser deliberately rejects tabs to avoid ambiguous indentation.
-
----
-
-## Build & Test Workflow
-
-After creating or modifying a skill:
-
-```bash
-# 1. Build manifest (compiles frontmatter → JSON)
-bun run build:manifest
-
-# 2. Validate all skills
-bun run validate
-
-# 3. Test with explain CLI
-bun run scripts/explain.ts --file
-
-# 4. Run full test suite
-bun test
-
-# 5. Build everything (hooks + manifest + templates)
-bun run build
-
-# 6. Run doctor to check for issues
-bun run doctor
-```
-
-The pre-commit hook automatically runs `build:hooks` when `.mts` files are staged, but you should manually run `build:manifest` when changing frontmatter.
diff --git a/docs/skill-injection.md b/docs/skill-injection.md
deleted file mode 100644
index f813a3b..0000000
--- a/docs/skill-injection.md
+++ /dev/null
@@ -1,766 +0,0 @@
-# Skill Injection Engine
-
-> **Audience**: Plugin developers, skill authors, and anyone debugging why a skill did or didn't inject.
-
-This document explains the complete skill injection pipeline — how vercel-plugin decides which skills to surface, when, and why. It covers both injection hooks (PreToolUse and UserPromptSubmit), the ranking system with all boost factors, the dedup state machine, budget enforcement, prompt signal scoring, and special-case triggers.
-
----
-
-## Table of Contents
-
-1. [How Injection Works (Overview)](#how-injection-works-overview)
-2. [PreToolUse Pipeline](#pretooluse-pipeline)
- - [Stage 1: Parse Input](#stage-1-parse-input)
- - [Stage 2: Load Skills](#stage-2-load-skills)
- - [Stage 3: Match Skills](#stage-3-match-skills)
- - [Stage 4: Rank & Deduplicate](#stage-4-rank--deduplicate)
- - [Stage 5: Inject with Budget Enforcement](#stage-5-inject-with-budget-enforcement)
- - [Stage 6: Format Output](#stage-6-format-output)
-3. [UserPromptSubmit Pipeline](#userpromptsubmit-pipeline)
-4. [Prompt Signal Scoring](#prompt-signal-scoring)
- - [Normalization](#normalization)
- - [Scoring Weights](#scoring-weights)
- - [Scoring Walkthrough](#scoring-walkthrough)
- - [Lexical Fallback](#lexical-fallback)
- - [Troubleshooting Intent Classification](#troubleshooting-intent-classification)
-5. [Ranking & Boost Factors](#ranking--boost-factors)
-6. [Dedup State Machine](#dedup-state-machine)
-7. [Budget Enforcement](#budget-enforcement)
-8. [Special-Case Triggers](#special-case-triggers)
- - [TSX Review Trigger](#tsx-review-trigger)
- - [Dev Server Detection](#dev-server-detection)
- - [Vercel Env Help](#vercel-env-help)
- - [Investigation Companion Selection](#investigation-companion-selection)
-9. [User Stories](#user-stories)
- - [User Story 1: TSX Edit Trigger](#user-story-1-tsx-edit-trigger)
- - [User Story 2: Dev Server Detection](#user-story-2-dev-server-detection)
- - [User Story 3: Prompt Signal Matching](#user-story-3-prompt-signal-matching)
-10. [PostToolUse Validation](#posttooluse-validation)
-11. [Environment Variables Reference](#environment-variables-reference)
-
----
-
-## How Injection Works (Overview)
-
-The plugin watches what Claude is doing and injects only the skills relevant to the current action. There are two independent injection paths:
-
-| Hook | Trigger | Budget | Max Skills | Match Method |
-|------|---------|--------|------------|--------------|
-| **PreToolUse** | Claude calls Read, Edit, Write, or Bash | 18 KB | 5 | File path globs, bash regex, import patterns |
-| **UserPromptSubmit** | User types a prompt | 8 KB | 2 | Prompt signal scoring (phrases/allOf/anyOf/noneOf) |
-
-Both hooks share the same dedup system — once a skill is injected, it won't be injected again in the same session.
-
-```mermaid
-flowchart LR
- subgraph "Claude Code Session"
- A["User types prompt"] --> B{"UserPromptSubmit
hook"}
- C["Claude calls tool"] --> D{"PreToolUse
hook"}
- end
-
- B --> E["Prompt Signal
Scoring"]
- D --> F["Pattern
Matching"]
-
- E --> G["Rank → Dedup → Budget"]
- F --> G
-
- G --> H["additionalContext
injection"]
- H --> I["Claude sees
skill content"]
-```
-
----
-
-## PreToolUse Pipeline
-
-The PreToolUse hook fires every time Claude calls Read, Edit, Write, or Bash. It runs a six-stage pipeline:
-
-```mermaid
-flowchart TD
- INPUT["stdin JSON
(tool_name, tool_input, session_id)"]
- PARSE["Stage 1: parseInput
Extract toolName, toolTarget, scopeId"]
- LOAD["Stage 2: loadSkills
Manifest (fast) or live scan (fallback)"]
- MATCH["Stage 3: matchSkills
Path globs / bash regex / import patterns"]
- SPECIAL["Stage 3.5: Special Triggers
TSX review (+40) · Dev server (+45) · Env help"]
- RANK["Stage 4: Rank & Deduplicate
vercel.json (±10) · Profiler (+5) · Setup (+50)"]
- INJECT["Stage 5: injectSkills
Budget: 18KB · Ceiling: 3 skills · Summary fallback"]
- FORMAT["Stage 6: formatOutput
JSON with additionalContext"]
-
- INPUT --> PARSE --> LOAD --> MATCH --> SPECIAL --> RANK --> INJECT --> FORMAT
-
- style PARSE fill:#e1f5fe
- style LOAD fill:#e8f5e9
- style MATCH fill:#fff3e0
- style SPECIAL fill:#fce4ec
- style RANK fill:#f3e5f5
- style INJECT fill:#fff9c4
- style FORMAT fill:#e0f2f1
-```
-
-### Stage 1: Parse Input
-
-**Source**: `pretooluse-skill-inject.mts:parseInput()`
-
-Reads JSON from stdin and extracts:
-
-| Field | Description |
-|-------|-------------|
-| `toolName` | One of `Read`, `Edit`, `Write`, `Bash` |
-| `toolInput` | The tool's arguments (e.g., `file_path`, `command`) |
-| `sessionId` | Used for file-based dedup |
-| `toolTarget` | Primary target — file path for file tools, command string for Bash |
-| `scopeId` | Agent ID for subagent-scoped dedup (undefined for main agent) |
-
-Unsupported tools (anything not in `["Read", "Edit", "Write", "Bash"]`) are rejected with an empty `{}` response.
-
-### Stage 2: Load Skills
-
-**Source**: `pretooluse-skill-inject.mts:loadSkills()`
-
-Two-tier loading strategy:
-
-1. **Manifest** (`generated/skill-manifest.json`): Pre-built JSON with pre-compiled regex. Version 2 includes paired arrays (`pathPatterns` ↔ `pathRegexSources`) so hooks reconstruct `RegExp` objects directly — no glob compilation needed.
-
-2. **Live scan** (fallback): Scans `skills/*/SKILL.md`, parses YAML frontmatter via `buildSkillMap()`, validates with `validateSkillMap()`, and compiles patterns at runtime.
-
-The manifest path is always preferred (faster: no filesystem scan, no YAML parsing, no glob compilation).
-
-### Stage 3: Match Skills
-
-**Source**: `pretooluse-skill-inject.mts:matchSkills()`
-
-**For file tools** (Read/Edit/Write):
-
-```mermaid
-flowchart TD
- FILE["file_path from tool input"] --> FULL{"Full path
matches glob?"}
- FULL -->|Yes| HIT["Match found
(type: full)"]
- FULL -->|No| BASE{"Basename
matches glob?"}
- BASE -->|Yes| HIT2["Match found
(type: basename)"]
- BASE -->|No| SUFFIX{"Suffix
matches glob?"}
- SUFFIX -->|Yes| HIT3["Match found
(type: suffix)"]
- SUFFIX -->|No| IMPORT{"File content
matches importPatterns?"}
- IMPORT -->|Yes| HIT4["Match found
(type: import)"]
- IMPORT -->|No| MISS["No match"]
-```
-
-**For Bash**: Match command string against each skill's compiled `bashPatterns` regex.
-
-Each match produces a `MatchReason` with the winning pattern and match type.
-
-### Stage 4: Rank & Deduplicate
-
-**Source**: `pretooluse-skill-inject.mts:deduplicateSkills()`
-
-Priority adjustments are applied in layers. See [Ranking & Boost Factors](#ranking--boost-factors) for the complete boost table.
-
-Steps:
-1. **Filter already-seen** — remove skills present in the merged dedup state
-2. **vercel.json routing** — if target is `vercel.json`, read keys, adjust priorities (±10)
-3. **Profiler boost** — skills in `VERCEL_PLUGIN_LIKELY_SKILLS` get +5
-4. **Setup-mode routing** — `bootstrap` gets +50 in greenfield projects
-5. **Rank** — sort by `effectivePriority` DESC, then skill name ASC
-6. **Cap** — take the top N skills (default 5)
-
-### Stage 5: Inject with Budget Enforcement
-
-**Source**: `pretooluse-skill-inject.mts:injectSkills()`
-
-For each ranked skill in priority order:
-
-1. Check hard ceiling (max 3 skills) — drop with `cap_exceeded`
-2. Read `skills//SKILL.md`, strip frontmatter, keep body
-3. Wrap in comment markers: `...`
-4. Check byte budget — **first skill always gets full body**; subsequent must fit remaining budget
-5. If over budget, try `summary` fallback (see [Budget Enforcement](#budget-enforcement))
-6. Atomically claim skill in dedup system via `tryClaimSessionKey()` (O_EXCL)
-
-### Stage 6: Format Output
-
-Assembles final JSON:
-
-```json
-{
- "hookSpecificOutput": {
- "additionalContext": "\n...body...\n\n"
- }
-}
-```
-
-The metadata comment includes matched skills, injected skills, match reasons, boost factors, and budget usage — useful for debugging.
-
----
-
-## UserPromptSubmit Pipeline
-
-The UserPromptSubmit hook fires when the user types a prompt, before any tool calls. It uses **prompt signal scoring** instead of pattern matching.
-
-**Source**: `user-prompt-submit-skill-inject.mts`
-
-```mermaid
-flowchart TD
- PROMPT["User prompt text"] --> LEN{"Length >= 10
characters?"}
- LEN -->|No| SKIP["Skip (too short)"]
- LEN -->|Yes| NORM["Normalize prompt
(lowercase, contractions, stem, whitespace)"]
- NORM --> SCORE["Score every skill's
promptSignals against prompt"]
- SCORE --> INTENT["Classify troubleshooting
intent (if any)"]
- INTENT --> COMPANION["Select investigation
companion (if needed)"]
- COMPANION --> DEDUP["Filter seen skills"]
- DEDUP --> BUDGET["Budget enforcement
(8KB / 2 skills)"]
- BUDGET --> OUTPUT["Format JSON output
with additionalContext"]
-```
-
-Key differences from PreToolUse:
-
-| Parameter | PreToolUse | UserPromptSubmit |
-|-----------|------------|------------------|
-| Budget | 18 KB | 8 KB |
-| Max skills | 5 | 2 |
-| Match method | File/bash/import patterns | Prompt signal scoring |
-| Min input | — | 10 characters |
-
----
-
-## Prompt Signal Scoring
-
-**Source**: `hooks/src/prompt-patterns.mts`
-
-Each skill can define `promptSignals` in its frontmatter to declare which user prompts should trigger injection.
-
-### Normalization
-
-Both the user's prompt and the signal terms undergo normalization before scoring:
-
-1. **Lowercase** — `"Deploy to Vercel"` → `"deploy to vercel"`
-2. **Contraction expansion** — `"it's"` → `"it is"`, `"don't"` → `"do not"`, `"can't"` → `"cannot"`
-3. **Stemming** — `"deploying"` → `"deploy"`, `"configured"` → `"configur"`, `"building"` → `"build"`
-4. **Whitespace collapse** — multiple spaces/newlines → single space
-
-Skill authors don't need to account for contractions or verb tenses in signal definitions.
-
-### Scoring Weights
-
-```mermaid
-flowchart TD
- N["Normalized prompt"] --> NONE{"noneOf
matches?"}
- NONE -->|"Any term found"| SUPPRESS["score = -Infinity
HARD SUPPRESS"]
- NONE -->|"No match"| PHRASES
-
- PHRASES["phrases check
+6 per exact substring hit"] --> ALLOF
- ALLOF["allOf check
+4 per group where ALL terms match"] --> ANYOF
- ANYOF["anyOf check
+1 per hit, capped at +2 total"] --> TOTAL
-
- TOTAL{"total >= minScore?"}
- TOTAL -->|"Yes"| MATCHED["MATCHED — inject skill"]
- TOTAL -->|"No"| LEXICAL["Try lexical fallback"]
-
- style SUPPRESS fill:#ffcdd2
- style MATCHED fill:#c8e6c9
-```
-
-| Signal Type | Weight | Behavior |
-|-------------|--------|----------|
-| `phrases` | **+6** per hit | Exact substring match (case-insensitive, word-boundary aware) |
-| `allOf` | **+4** per group | All terms in the group must appear in the prompt |
-| `anyOf` | **+1** per hit, **capped at +2** | Prevents low-signal flooding |
-| `noneOf` | **-Infinity** | Hard suppress — skill is permanently excluded for this prompt |
-| `minScore` | threshold (default **6**) | Score must meet or exceed to qualify |
-
-### Scoring Walkthrough
-
-**Prompt**: *"I want to add the ai sdk for streaming"*
-
-**Skill signals**:
-```yaml
-promptSignals:
- phrases: ["ai sdk"] # "ai sdk" is a substring → +6
- allOf: [["streaming", "generation"]] # Only "streaming" matched → +0
- anyOf: ["streaming"] # "streaming" present → +1
- minScore: 6
-```
-
-| Component | Matches? | Score |
-|-----------|----------|-------|
-| phrases: `"ai sdk"` | Yes (substring) | +6 |
-| allOf: `["streaming", "generation"]` | Partial (1 of 2) | +0 |
-| anyOf: `"streaming"` | Yes | +1 |
-| **Total** | | **7** |
-
-7 >= minScore 6 → **skill injects**.
-
-**Another example** — reaching threshold via allOf + anyOf only:
-
-```yaml
-promptSignals:
- allOf: [["cron", "schedule"]] # +4
- anyOf: ["vercel", "deploy", "prod"] # +1 each, capped at +2
- minScore: 6
-```
-
-Prompt: *"I need to schedule a cron job on vercel for production"*
-- allOf `["cron", "schedule"]` both present → +4
-- anyOf `"vercel"` +1, `"prod"` (stemmed from "production") +1 → +2 (cap reached)
-- Total: 6 >= 6 → **matched**
-
-### Lexical Fallback
-
-**Source**: `prompt-patterns.mts:scorePromptWithLexical()`
-
-When exact scoring doesn't reach the threshold, a **TF-IDF lexical index** provides a fallback. The lexical score is boosted by 1.35x and compared against the exact score. The higher wins.
-
-This catches prompts that are topically relevant but don't exactly hit configured phrases.
-
-### Troubleshooting Intent Classification
-
-**Source**: `user-prompt-submit-skill-inject.mts:classifyTroubleshootingIntent()`
-
-A regex classifier detects three troubleshooting intents:
-
-| Intent | Pattern Examples | Routed Skills |
-|--------|-----------------|---------------|
-| `flow-verification` | "loads but", "submits but", "works locally but" | `verification` |
-| `stuck-investigation` | "stuck", "frozen", "timed out", "not responding" | `investigation-mode` |
-| `browser-only` | "blank page", "white screen", "console errors" | `agent-browser-verify` + `investigation-mode` |
-
-**Suppression**: Test framework mentions (`jest`, `vitest`, `playwright test`) suppress all verification-family skills to avoid injecting browser guidance during unit testing.
-
----
-
-## Ranking & Boost Factors
-
-Every matched skill goes through a ranking pipeline that applies priority adjustments in layers:
-
-```mermaid
-flowchart LR
- BASE["Base Priority
SKILL.md frontmatter
Range 4–8, default 5"]
- VJ["vercel.json Routing
Relevant key: +10
Irrelevant: −10"]
- PROF["Profiler Boost
Likely skill: +5"]
- SETUP["Setup Mode
bootstrap: +50"]
- TSX["TSX Review
react-best-practices: +40"]
- DEV["Dev Server
agent-browser-verify: +45"]
-
- BASE --> VJ --> PROF --> SETUP
- SETUP --> RANK["rankEntries()
effectivePriority DESC
Tiebreak: name ASC"]
- TSX -.-> RANK
- DEV -.-> RANK
-
- style BASE fill:#e1f5fe
- style VJ fill:#fff3e0
- style PROF fill:#e8f5e9
- style SETUP fill:#fce4ec
- style TSX fill:#f3e5f5
- style DEV fill:#ede7f6
-```
-
-### Complete Boost Factor Table
-
-| Boost | Value | Condition | Target Skill | Source |
-|-------|-------|-----------|--------------|--------|
-| **Base priority** | 4–8 | Always applied | All skills | `SKILL.md` frontmatter |
-| **Profiler** | **+5** | Skill is in `VERCEL_PLUGIN_LIKELY_SKILLS` | Any detected skill | `session-start-profiler.mts` |
-| **vercel.json relevant** | **+10** | Tool target is `vercel.json` and file keys match the skill | `routing-middleware`, `cron-jobs`, `vercel-functions`, `deployments-cicd` | `vercel-config.mts` |
-| **vercel.json irrelevant** | **−10** | Tool target is `vercel.json` but file keys don't match | Skills that claim `vercel.json` in `pathPatterns` | `vercel-config.mts` |
-| **Setup mode** | **+50** | 3+ bootstrap hints detected; greenfield project | `bootstrap` | `session-start-profiler.mts` |
-| **TSX review** | **+40** | 3+ `.tsx` edits (configurable via `VERCEL_PLUGIN_REVIEW_THRESHOLD`) | `react-best-practices` | `pretooluse-skill-inject.mts` |
-| **Dev server** | **+45** | Bash command matches dev server pattern (`next dev`, etc.) | `agent-browser-verify` + companions | `pretooluse-skill-inject.mts` |
-
-**Priority formula**:
-```
-effectivePriority = basePriority
- + vercelJsonAdjustment (±10 or 0)
- + profilerBoost (+5 or 0)
- + setupModeBoost (+50 or 0)
- + specialTriggerBoost (+40 or +45 or 0)
-```
-
-### vercel.json Key-to-Skill Routing
-
-**Source**: `hooks/src/vercel-config.mts`
-
-When the tool target is `vercel.json`, the hook reads the file's top-level keys and maps them to skills:
-
-| vercel.json Keys | Relevant Skill |
-|------------------|----------------|
-| `redirects`, `rewrites`, `headers`, `cleanUrls`, `trailingSlash` | `routing-middleware` |
-| `crons` | `cron-jobs` |
-| `functions`, `regions` | `vercel-functions` |
-| `builds`, `buildCommand`, `installCommand`, `outputDirectory`, `framework`, `devCommand`, `ignoreCommand` | `deployments-cicd` |
-
-Skills relevant to the file's keys get **+10**; skills that claim `vercel.json` but aren't relevant get **−10**.
-
-### Profiler Detection
-
-**Source**: `hooks/src/session-start-profiler.mts`
-
-The session-start profiler scans the project and sets `VERCEL_PLUGIN_LIKELY_SKILLS`:
-
-| Detected Signal | Skills Added |
-|-----------------|-------------|
-| `next.config.*` file | `nextjs`, `turbopack` |
-| `turbo.json` file | `turborepo` |
-| `vercel.json` file | `vercel-cli`, `deployments-cicd`, `vercel-functions` |
-| `middleware.ts\|js` file | `routing-middleware` |
-| `components.json` file | `shadcn` |
-| `next` in package.json deps | `nextjs` |
-| `@vercel/blob\|kv\|postgres` in deps | `vercel-storage` |
-| `@vercel/flags` in deps | `vercel-flags` |
-| `crons` key in vercel.json | `cron-jobs` |
-| ...and many more | See `session-start-profiler.mts` |
-
-**Bootstrap/setup mode**: When 3+ "bootstrap hints" are detected (e.g., `.env.example`, `prisma/schema.prisma`, auth dependencies, storage resource dependencies), `VERCEL_PLUGIN_SETUP_MODE=1` is set, enabling the +50 boost for the `bootstrap` skill.
-
----
-
-## Dedup State Machine
-
-The dedup system prevents re-injecting skills already delivered in the current session. It uses three independent state sources merged into a unified view:
-
-```mermaid
-stateDiagram-v2
- [*] --> Init: SessionStart sets
VERCEL_PLUGIN_SEEN_SKILLS=""
-
- state "Three State Sources" as sources {
- EV: Env Var
VERCEL_PLUGIN_SEEN_SKILLS
(comma-delimited)
- CD: Claim Directory
tmp/.../seen-skills.d/
(one file per skill, O_EXCL)
- SF: Session File
tmp/.../seen-skills.txt
(comma-delimited snapshot)
- }
-
- Init --> sources: Hook invocation
-
- sources --> Merge: mergeSeenSkillStates()
union all three sources
-
- Merge --> Check: Skill in merged set?
- Check --> Skip: Yes → already injected
- Check --> Claim: No → try atomic claim
-
- Claim --> Success: openSync(path, "wx")
File created
- Claim --> Race: File already exists
(concurrent hook won)
-
- Success --> Sync: syncSessionFileFromClaims()
- Race --> Skip
-
- state "Fallback Strategies" as fb {
- F1: file — atomic claims (primary)
- F2: env-var — no session ID
- F3: memory-only — single invocation
- F4: disabled — HOOK_DEDUP=off
- }
-```
-
-**Key design choice**: The atomic `openSync(path, "wx")` (O_EXCL) flag means if two hooks try to claim the same skill simultaneously, only one succeeds. This provides filesystem-level mutual exclusion.
-
-**Subagent isolation**: Subagents (identified by `scopeId`) get their own dedup scope. The parent's env var is excluded from the subagent's merge, so subagents get fresh skill injection.
-
-**Cleanup**: `session-end-cleanup.mts` deletes all temporary dedup files and claim directories when the session ends.
-
----
-
-## Budget Enforcement
-
-Budget enforcement prevents the plugin from flooding Claude's context window.
-
-### PreToolUse Budget
-
-| Parameter | Default | Env Override |
-|-----------|---------|-------------|
-| Byte budget | **18,000 bytes** (18 KB) | `VERCEL_PLUGIN_INJECTION_BUDGET` |
-| Max skills | **5** | — (constant `MAX_SKILLS`) |
-
-### UserPromptSubmit Budget
-
-| Parameter | Default | Env Override |
-|-----------|---------|-------------|
-| Byte budget | **8,000 bytes** (8 KB) | `VERCEL_PLUGIN_PROMPT_INJECTION_BUDGET` |
-| Max skills | **2** | — (constant `MAX_SKILLS`) |
-
-### How Budget Is Applied
-
-```mermaid
-flowchart TD
- START["Ranked skills in priority order"] --> FIRST{"First skill?"}
- FIRST -->|Yes| ALWAYS["Always inject full body
(even if over budget)"]
- FIRST -->|No| CHECK{"Body fits in
remaining budget?"}
-
- CHECK -->|Yes| FULL["Inject full body"]
- CHECK -->|No| SUM{"Summary field
exists + fits?"}
-
- SUM -->|Yes| SUMMARY["Inject summary only
"]
- SUM -->|No| DROP["Drop skill
(droppedByBudget)"]
-
- ALWAYS --> CAP{"Ceiling reached?
(5 or 2 skills)"}
- FULL --> CAP
- SUMMARY --> CAP
-
- CAP -->|Yes| DONE["Stop — cap_exceeded"]
- CAP -->|No| NEXT["Next skill"] --> CHECK
-```
-
-Rules:
-- **First skill always gets full body** regardless of budget
-- Skills are measured as UTF-8 bytes after wrapping in comment markers
-- Summary fallback injects with a `mode:summary` marker
-- Skills that neither fit as full body nor summary are dropped
-
----
-
-## Special-Case Triggers
-
-These operate alongside the normal matching pipeline with their own counter/dedup mechanisms.
-
-### TSX Review Trigger
-
-**Source**: `pretooluse-skill-inject.mts:checkTsxReviewTrigger()`
-
-Injects `react-best-practices` after repeated `.tsx` edits to catch React antipatterns early.
-
-| Parameter | Default | Env Override |
-|-----------|---------|-------------|
-| Edit threshold | 3 | `VERCEL_PLUGIN_REVIEW_THRESHOLD` |
-| Priority boost | +40 | — |
-| Counter env | — | `VERCEL_PLUGIN_TSX_EDIT_COUNT` |
-
-**Behavior**:
-1. Every Edit/Write on a `.tsx` file increments `VERCEL_PLUGIN_TSX_EDIT_COUNT`
-2. When count >= threshold, the trigger fires
-3. Counter **resets** after injection, allowing re-injection after another N edits
-4. **Bypasses** normal SEEN_SKILLS dedup — the counter is the sole gate
-
-### Dev Server Detection
-
-**Source**: `pretooluse-skill-inject.mts:checkDevServerVerify()`
-
-When Claude runs a dev server command, `agent-browser-verify` injects to encourage browser-based verification.
-
-**Detected dev server patterns**:
-```
-next dev, npm run dev, pnpm dev, bun dev, bun run dev,
-yarn dev, vite dev, vite, nuxt dev, vercel dev, astro dev
-```
-
-| Parameter | Value |
-|-----------|-------|
-| Priority boost | +45 |
-| Max iterations | 2 per session |
-| Loop guard env | `VERCEL_PLUGIN_DEV_VERIFY_COUNT` |
-| Companion skills | `verification` (co-injected as summary) |
-
-**Graceful degradation**: If `agent-browser` is not installed (`VERCEL_PLUGIN_AGENT_BROWSER_AVAILABLE=0`), the hook injects an unavailability notice suggesting the user install it.
-
-### Vercel Env Help
-
-**Source**: `pretooluse-skill-inject.mts`
-
-One-time injection of a quick-reference guide when Claude runs `vercel env add|update|pull`. Uses standard dedup with key `vercel-env-help`.
-
-### Investigation Companion Selection
-
-**Source**: `user-prompt-submit-skill-inject.mts:selectInvestigationCompanion()`
-
-When `investigation-mode` is selected via prompt signals, the second skill slot is reserved for the best companion from a prioritized list:
-
-1. `workflow` (highest priority)
-2. `agent-browser-verify`
-3. `vercel-cli`
-
-The companion must have independently scored above its `minScore`.
-
----
-
-## User Stories
-
-### User Story 1: TSX Edit Trigger
-
-> **Scenario**: Sarah is building a dashboard. She's been editing React components and is on her 3rd `.tsx` file edit.
-
-**What happens**:
-
-```mermaid
-sequenceDiagram
- participant S as Sarah
- participant CC as Claude Code
- participant Hook as PreToolUse Hook
-
- S->>CC: "Add a useEffect to fetch data"
- CC->>Hook: Edit tool on dashboard.tsx
- Note over Hook: TSX_EDIT_COUNT: 1 → 2
- Hook-->>CC: (no injection, count < 3)
-
- S->>CC: "Now add the loading state"
- CC->>Hook: Edit tool on dashboard.tsx
- Note over Hook: TSX_EDIT_COUNT: 2 → 3
- Note over Hook: Threshold reached!
- Note over Hook: Inject react-best-practices (+40 boost)
- Hook-->>CC: additionalContext with React best practices
-
- Note over CC: Claude now has guidance on:
- Hook dependencies
- Memoization
- Error boundaries
- "use client" directive
-
- S->>CC: "Extract this into a custom hook"
- CC->>Hook: Edit tool on useData.tsx
- Note over Hook: TSX_EDIT_COUNT: 0 → 1 (reset after injection)
- Hook-->>CC: (no injection, fresh counter)
-```
-
-**Why it matters**: After several TSX edits, Claude accumulates context about what the developer is building. The react-best-practices skill arrives at the right moment — when Claude has enough context to apply the guidance meaningfully, and before the code grows too large to refactor easily.
-
-**Key details**:
-- The counter increments on **any** `.tsx` file edit (Write or Edit tool)
-- After injection, the counter resets to 0, not to 1
-- The trigger bypasses normal dedup — it can fire multiple times per session
-- The +40 boost ensures `react-best-practices` outranks almost any other skill
-
-### User Story 2: Dev Server Detection
-
-> **Scenario**: Marcus just finished implementing a feature and asks Claude to start the dev server so he can test it.
-
-**What happens**:
-
-```mermaid
-sequenceDiagram
- participant M as Marcus
- participant CC as Claude Code
- participant Hook as PreToolUse Hook
- participant Prof as Session Profiler
-
- Note over Prof: Session start: detected agent-browser CLI
Set AGENT_BROWSER_AVAILABLE=1
-
- M->>CC: "Start the dev server"
- CC->>Hook: Bash tool: "npm run dev"
- Note over Hook: Dev server pattern matched!
agent-browser available ✓
Iteration count: 0 < 2
-
- Note over Hook: Inject agent-browser-verify (+45)
+ verification companion (summary)
- Hook-->>CC: additionalContext with browser verification guide
-
- Note over CC: Claude now knows to:
1. Wait for server to be ready
2. Open browser to verify
3. Check for console errors
4. Take a screenshot
-
- M->>CC: "Looks broken, restart the dev server"
- CC->>Hook: Bash tool: "npm run dev"
- Note over Hook: Iteration count: 1 < 2
- Hook-->>CC: Browser verification injected again
-
- M->>CC: "One more restart please"
- CC->>Hook: Bash tool: "npm run dev"
- Note over Hook: Iteration count: 2 >= 2
Loop guard hit!
- Hook-->>CC: (no injection — prevents infinite loops)
-```
-
-**Why it matters**: When a developer starts a dev server, they expect to see their changes in a browser. The plugin nudges Claude to verify using browser automation rather than just assuming the server started correctly.
-
-**Key details**:
-- The profiler checks at session start whether `agent-browser` CLI is on PATH
-- If not installed, the hook injects an **unavailability notice** instead (suggesting installation)
-- The loop guard (max 2 iterations) prevents the skill from being injected on every dev server restart
-- The `verification` skill is co-injected as a summary-only companion
-
-### User Story 3: Prompt Signal Matching
-
-> **Scenario**: Jess is building a Next.js app and types "my deploy keeps failing with a timeout error" into Claude Code.
-
-**What happens**:
-
-```mermaid
-sequenceDiagram
- participant J as Jess
- participant CC as Claude Code
- participant Hook as UserPromptSubmit Hook
-
- J->>CC: "my deploy keeps failing with a timeout error"
-
- Note over Hook: Step 1: Normalize prompt
- Note over Hook: "my deploy keep fail with a timeout error"
(lowercase + stem "keeps"→"keep", "failing"→"fail")
-
- Note over Hook: Step 2: Score all skills with promptSignals
-
- rect rgb(255, 243, 224)
- Note over Hook: investigation-mode scoring:
phrase "timeout" → not exact (no "timeout error" phrase)
allOf ["timeout", "api"] → "timeout" ✓, "api" ✗ → +0
anyOf ["timeout", "stuck", "debug"] → "timeout" +1 → +1
Score: 1 < minScore 4 → NOT MATCHED
- end
-
- rect rgb(232, 245, 233)
- Note over Hook: deployments-cicd scoring:
phrase "deploy" → +6
allOf ["deploy", "fail"] → both present → +4
anyOf ["timeout", "error"] → +1, +1 → +2 (capped)
Score: 12 >= minScore 6 → MATCHED ✓
- end
-
- rect rgb(227, 242, 253)
- Note over Hook: vercel-functions scoring:
phrase "timeout" → +6 (if configured)
Score: 6 >= minScore 6 → MATCHED ✓
- end
-
- Note over Hook: Step 3: Rank by score DESC
- Note over Hook: deployments-cicd (12) > vercel-functions (6)
- Note over Hook: Step 4: Budget check (8KB, max 2 skills)
-
- Hook-->>CC: additionalContext with deployment + functions guidance
-
- Note over CC: Claude now has guidance on:
- Deployment debugging steps
- Function timeout configuration
- Vercel build logs analysis
-```
-
-**Why it matters**: The prompt signal system catches user intent even when they don't mention specific technologies. The scoring formula ensures the most relevant skill wins — `deployments-cicd` scores higher because it matches on both the phrase "deploy" and the allOf group ["deploy", "fail"].
-
-**Key details**:
-- Stemming converts "failing" → "fail" and "keeps" → "keep", making signals match naturally
-- The `noneOf` mechanism ensures skills aren't injected for irrelevant contexts (e.g., investigation-mode has `noneOf: ["css stuck", "sticky position"]`)
-- The 8KB budget and 2-skill cap keep prompt injection lean since it's speculative
-- If both investigation-mode and a companion were matched, investigation companion selection would kick in
-
----
-
-## PostToolUse Validation
-
-**Source**: `hooks/src/posttooluse-validate.mts`
-
-After Claude writes or edits a file, the PostToolUse hook runs validation rules from matched skills.
-
-```mermaid
-flowchart TD
- WRITE["Claude writes/edits file"] --> MATCH["Match file path → skills
(using pathPatterns)"]
- MATCH --> LOOP["For each matched skill"]
- LOOP --> RULE["For each validate rule"]
- RULE --> SKIP{"skipIfFileContains
matches?"}
- SKIP -->|Yes| NEXT["Skip rule"]
- SKIP -->|No| TEST{"pattern matches
any line?"}
- TEST -->|Yes| REPORT["Report violation
(severity + message)"]
- TEST -->|No| NEXT
- NEXT --> RULE
- REPORT --> LOOP
-```
-
-**Validation rule fields**:
-
-| Field | Type | Required | Description |
-|-------|------|----------|-------------|
-| `pattern` | `string` (regex) | Yes | Pattern to search for in file content |
-| `message` | `string` | Yes | Actionable fix instruction for Claude |
-| `severity` | `"error"` or `"warn"` | Yes | `error` = must fix; `warn` = advisory |
-| `skipIfFileContains` | `string` (regex) | No | Skip rule if file matches this pattern |
-
-**Example** — Next.js async cookies rule:
-
-```yaml
-validate:
- - pattern: (?
## Greenfield execution mode
@@ -29,7 +30,7 @@ function detectInjectClaudeMdPlatform(input, _env = process.env) {
}
return "claude-code";
}
-function buildInjectClaudeMdParts(content, env = process.env, knowledgeUpdate = null) {
+function buildInjectClaudeMdParts(content, env = process.env, knowledgeUpdate = null, greenfield = env.VERCEL_PLUGIN_GREENFIELD === "true") {
const parts = [];
if (content !== null) {
parts.push(content);
@@ -37,7 +38,7 @@ function buildInjectClaudeMdParts(content, env = process.env, knowledgeUpdate =
if (knowledgeUpdate !== null) {
parts.push(knowledgeUpdate);
}
- if (env.VERCEL_PLUGIN_GREENFIELD === "true") {
+ if (greenfield) {
parts.push(GREENFIELD_CONTEXT);
}
return parts;
@@ -48,6 +49,9 @@ function formatInjectClaudeMdOutput(platform, content) {
}
return content;
}
+function resolveInjectClaudeMdProjectRoot(env = process.env) {
+ return env.CLAUDE_PROJECT_ROOT ?? env.CURSOR_PROJECT_DIR ?? process.cwd();
+}
function stripFrontmatter(content) {
const match = content.match(/^---\n[\s\S]*?\n---\n?([\s\S]*)$/);
return match ? match[1].trim() : content.trim();
@@ -55,10 +59,25 @@ function stripFrontmatter(content) {
function main() {
const input = parseInjectClaudeMdInput(readFileSync(0, "utf8"));
const platform = detectInjectClaudeMdPlatform(input);
+ const projectRoot = resolveInjectClaudeMdProjectRoot();
+ const isGreenfield = isGreenfieldDirectory(projectRoot);
+ const greenfieldOverride = process.env.VERCEL_PLUGIN_GREENFIELD === "true";
+ const shouldActivate = isGreenfield || greenfieldOverride || !existsSync(projectRoot) || hasSessionStartActivationMarkers(projectRoot);
+ if (!shouldActivate) {
+ if (platform === "cursor") {
+ process.stdout.write(JSON.stringify(formatOutput(platform, {})));
+ }
+ return;
+ }
const thinSessionContext = safeReadFile(join(pluginRoot(), "vercel-session.md"));
const knowledgeUpdateRaw = safeReadFile(join(pluginRoot(), "skills", "knowledge-update", "SKILL.md"));
const knowledgeUpdate = knowledgeUpdateRaw !== null ? stripFrontmatter(knowledgeUpdateRaw) : null;
- const parts = buildInjectClaudeMdParts(thinSessionContext, process.env, knowledgeUpdate);
+ const parts = buildInjectClaudeMdParts(
+ thinSessionContext,
+ process.env,
+ knowledgeUpdate,
+ isGreenfield || greenfieldOverride
+ );
if (parts.length === 0) {
return;
}
diff --git a/hooks/platform-hook-compat.test.ts b/hooks/platform-hook-compat.test.ts
index 3b23c18..d43095a 100644
--- a/hooks/platform-hook-compat.test.ts
+++ b/hooks/platform-hook-compat.test.ts
@@ -1,32 +1,8 @@
-import { afterEach, describe, expect, it } from "bun:test";
-import { spawnSync } from "node:child_process";
-import { mkdirSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from "node:fs";
-import { tmpdir } from "node:os";
-import { join } from "node:path";
+import { describe, expect, it } from "bun:test";
import {
formatOutput as formatPreToolOutput,
parseInput as parsePreToolInput,
} from "./src/pretooluse-skill-inject.mts";
-import {
- formatOutput as formatPostToolOutput,
- parseInput as parsePostToolInput,
-} from "./src/posttooluse-validate.mts";
-import type { ValidationViolation } from "./src/posttooluse-validate.mts";
-
-const tempDirs = new Set();
-
-function createTempDir(prefix: string): string {
- const dir = mkdtempSync(join(tmpdir(), prefix));
- tempDirs.add(dir);
- return dir;
-}
-
-afterEach(() => {
- for (const dir of tempDirs) {
- rmSync(dir, { recursive: true, force: true });
- }
- tempDirs.clear();
-});
describe("platform hook compatibility", () => {
it("test_parseInput_normalizes_cursor_session_and_workspace_root_for_pretooluse", () => {
@@ -96,90 +72,4 @@ describe("platform hook compatibility", () => {
expect(parsed.hookSpecificOutput).toBeUndefined();
});
- it("test_parseInput_normalizes_cursor_project_dir_for_posttooluse", () => {
- const parsed = parsePostToolInput(
- JSON.stringify({
- tool_name: "Edit",
- tool_input: { file_path: "app/page.tsx" },
- conversation_id: "cursor-conversation",
- }),
- undefined,
- {
- ...process.env,
- CURSOR_PROJECT_DIR: "/tmp/cursor-project",
- CLAUDE_PROJECT_ROOT: "/tmp/claude-project",
- },
- );
-
- expect(parsed).not.toBeNull();
- expect(parsed?.platform).toBe("cursor");
- expect(parsed?.sessionId).toBe("cursor-conversation");
- expect(parsed?.cwd).toBe("/tmp/cursor-project");
- });
-
- it("test_formatOutput_returns_cursor_flat_payload_for_posttooluse", () => {
- const violations: ValidationViolation[] = [
- {
- skill: "ai-sdk",
- line: 8,
- message: "Use streamText for streaming responses.",
- severity: "recommended",
- matchedText: "generateText",
- },
- ];
-
- const output = formatPostToolOutput(
- violations,
- ["ai-sdk"],
- "app/page.tsx",
- undefined,
- "cursor",
- );
-
- const parsed = JSON.parse(output);
- expect(parsed.additional_context).toContain("VALIDATION");
- expect(parsed.additional_context).toContain("app/page.tsx");
- expect(parsed.hookSpecificOutput).toBeUndefined();
- });
-
- it("test_posttooluse_shadcn_font_fix_uses_cursor_workspace_root_and_returns_flat_output", () => {
- const projectRoot = createTempDir("vercel-plugin-shadcn-");
- mkdirSync(join(projectRoot, "app"), { recursive: true });
- writeFileSync(
- join(projectRoot, "app/globals.css"),
- [
- "@theme inline {",
- " --font-sans: var(--font-sans);",
- " --font-mono: var(--font-geist-mono);",
- "}",
- ].join("\n"),
- "utf-8",
- );
-
- const result = spawnSync(process.execPath, ["hooks/posttooluse-shadcn-font-fix.mjs"], {
- cwd: process.cwd(),
- encoding: "utf-8",
- env: {
- ...process.env,
- CURSOR_PROJECT_DIR: "/tmp/incorrect-cursor-project",
- },
- input: JSON.stringify({
- tool_name: "Bash",
- tool_input: { command: "npx shadcn@latest init" },
- conversation_id: "cursor-conversation",
- workspace_roots: [projectRoot],
- }),
- });
-
- expect(result.status).toBe(0);
- expect(result.stderr).toBe("");
-
- const parsed = JSON.parse(result.stdout.trim());
- expect(parsed.additional_context).toContain("Auto-fix applied");
- expect(parsed.additionalContext).toBeUndefined();
-
- const content = readFileSync(join(projectRoot, "app/globals.css"), "utf-8");
- expect(content).toContain('"Geist", "Geist Fallback"');
- expect(content).toContain('"Geist Mono", "Geist Mono Fallback"');
- });
});
diff --git a/hooks/posttooluse-bash-chain.mjs b/hooks/posttooluse-bash-chain.mjs
deleted file mode 100755
index 06aa49a..0000000
--- a/hooks/posttooluse-bash-chain.mjs
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/posttooluse-bash-chain.mts
-import { readFileSync, realpathSync } from "fs";
-import { join, resolve } from "path";
-import { fileURLToPath } from "url";
-import { detectPlatform } from "./compat.mjs";
-import {
- pluginRoot as resolvePluginRoot,
- readSessionFile,
- safeReadFile,
- tryClaimSessionKey,
- syncSessionFileFromClaims
-} from "./hook-env.mjs";
-import { extractFrontmatter } from "./skill-map-frontmatter.mjs";
-import { createLogger } from "./logger.mjs";
-var PLUGIN_ROOT = resolvePluginRoot();
-var CHAIN_BUDGET_BYTES = 18e3;
-var DEFAULT_CHAIN_CAP = 2;
-var PACKAGE_SKILL_MAP = {
- // Express / traditional Node servers → Vercel Functions
- express: {
- skill: "vercel-functions",
- message: "Express.js detected \u2014 Vercel uses Web API route handlers, not Express. Loading Vercel Functions guidance."
- },
- fastify: {
- skill: "vercel-functions",
- message: "Fastify detected \u2014 consider Vercel Functions with Web Request/Response API for serverless deployment."
- },
- koa: {
- skill: "vercel-functions",
- message: "Koa detected \u2014 consider Vercel Functions with Web Request/Response API for serverless deployment."
- },
- // Database / ORM libraries → Vercel Storage
- mongoose: {
- skill: "vercel-storage",
- message: "Mongoose detected \u2014 loading Vercel Storage guidance for database options on the platform."
- },
- prisma: {
- skill: "vercel-storage",
- message: "Prisma detected \u2014 loading Vercel Storage guidance for Neon Postgres (recommended) and other Marketplace databases."
- },
- "@libsql/client": {
- skill: "vercel-storage",
- message: "@libsql/client detected \u2014 loading Vercel Storage guidance for Marketplace database alternatives."
- },
- "@vercel/postgres": {
- skill: "vercel-storage",
- message: "@vercel/postgres is sunset \u2014 use @neondatabase/serverless instead. Loading Storage migration guidance."
- },
- "@vercel/kv": {
- skill: "vercel-storage",
- message: "@vercel/kv is sunset \u2014 use @upstash/redis instead. Loading Storage migration guidance."
- },
- // Payments → Stripe integration
- stripe: {
- skill: "payments",
- message: "Stripe detected \u2014 loading Vercel Marketplace Stripe integration guidance for checkout, webhooks, and subscriptions."
- },
- // Direct AI provider SDKs → AI Gateway
- openai: {
- skill: "ai-gateway",
- message: "Direct OpenAI SDK detected \u2014 AI Gateway provides OIDC auth, failover, and cost tracking with no manual API keys. Loading AI Gateway guidance."
- },
- "@anthropic-ai/sdk": {
- skill: "ai-gateway",
- message: "Direct Anthropic SDK detected \u2014 AI Gateway provides unified access to all providers. Loading AI Gateway guidance."
- },
- "@google/generative-ai": {
- skill: "ai-gateway",
- message: "Direct Google AI SDK detected \u2014 AI Gateway provides unified access to all providers. Loading AI Gateway guidance."
- },
- langchain: {
- skill: "ai-sdk",
- message: "LangChain detected \u2014 AI SDK v6 provides native tool calling, agents, and streaming without the LangChain abstraction layer. Loading AI SDK guidance."
- },
- "@langchain/core": {
- skill: "ai-sdk",
- message: "LangChain Core detected \u2014 AI SDK v6 provides native tool calling, agents, and streaming without the LangChain abstraction layer. Loading AI SDK guidance."
- },
- // Auth
- "next-auth": {
- skill: "auth",
- message: "next-auth detected \u2014 consider Clerk via Vercel Marketplace for managed auth with auto-provisioned env vars. Loading auth guidance."
- },
- "@clerk/nextjs": {
- skill: "auth",
- message: "@clerk/nextjs detected \u2014 loading Vercel Marketplace Clerk integration guidance for middleware auth and sign-in flows."
- },
- // CMS
- "@sanity/client": {
- skill: "cms",
- message: "@sanity/client detected \u2014 loading Vercel Marketplace Sanity integration guidance for studio, preview mode, and revalidation."
- },
- contentful: {
- skill: "cms",
- message: "Contentful detected \u2014 loading CMS integration guidance for content modeling, preview mode, and revalidation webhooks."
- },
- // Chat platforms → Chat SDK
- "@slack/bolt": {
- skill: "chat-sdk",
- message: "@slack/bolt detected \u2014 Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance."
- },
- "@slack/web-api": {
- skill: "chat-sdk",
- message: "@slack/web-api detected \u2014 Chat SDK provides a unified multi-platform API with cards, streaming, and state management. Loading Chat SDK guidance."
- },
- "discord.js": {
- skill: "chat-sdk",
- message: "discord.js detected \u2014 Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance."
- },
- telegraf: {
- skill: "chat-sdk",
- message: "Telegraf detected \u2014 Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance."
- },
- grammy: {
- skill: "chat-sdk",
- message: "Grammy detected \u2014 Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance."
- },
- // Email
- resend: {
- skill: "email",
- message: "Resend detected \u2014 loading Vercel Marketplace Resend integration guidance for transactional emails and React Email templates."
- },
- // Workflow-related
- workflow: {
- skill: "workflow",
- message: "Workflow DevKit installed \u2014 loading WDK guidance for durable workflows."
- },
- // AI SDK
- ai: {
- skill: "ai-sdk",
- message: "AI SDK installed \u2014 loading AI SDK v6 guidance."
- },
- "@ai-sdk/react": {
- skill: "ai-sdk",
- message: "@ai-sdk/react installed \u2014 loading AI SDK v6 guidance for React hooks."
- },
- // Security / middleware
- cors: {
- skill: "routing-middleware",
- message: "cors detected \u2014 Vercel Routing Middleware handles CORS at the platform level with rewrites and headers. Loading Routing Middleware guidance."
- },
- // Env management
- dotenv: {
- skill: "env-vars",
- message: "dotenv detected \u2014 Vercel manages environment variables natively via `vercel env`. Loading env-vars guidance."
- }
-};
-var log = createLogger();
-var INSTALL_CMD_RE = /(?:npm\s+(?:install|i|add)|yarn\s+add|pnpm\s+(?:add|install)|bun\s+(?:add|install))\s+(.+)/;
-function parseInstallCommand(command) {
- if (!command || typeof command !== "string") return [];
- const match = INSTALL_CMD_RE.exec(command);
- if (!match) return [];
- const pkgString = match[1];
- const packages = [];
- for (const token of pkgString.split(/\s+/)) {
- if (!token) continue;
- if (token.startsWith("-")) continue;
- if (token.startsWith(".") || token.startsWith("/")) continue;
- let pkgName = token;
- if (pkgName.startsWith("@")) {
- const slashIndex = pkgName.indexOf("/");
- if (slashIndex > 0) {
- const afterSlash = pkgName.slice(slashIndex + 1);
- const versionAt = afterSlash.indexOf("@");
- if (versionAt > 0) {
- pkgName = pkgName.slice(0, slashIndex + 1 + versionAt);
- }
- }
- } else {
- const atIndex = pkgName.indexOf("@");
- if (atIndex > 0) {
- pkgName = pkgName.slice(0, atIndex);
- }
- }
- if (pkgName) packages.push(pkgName);
- }
- return packages;
-}
-function resolveSessionId(input) {
- const sessionId = input.session_id ?? input.conversation_id;
- return typeof sessionId === "string" && sessionId.trim() !== "" ? sessionId : null;
-}
-function parseBashInput(raw, logger) {
- const l = logger || log;
- const trimmed = (raw || "").trim();
- if (!trimmed) return null;
- let input;
- try {
- input = JSON.parse(trimmed);
- } catch {
- return null;
- }
- const toolName = input.tool_name || "";
- if (toolName !== "Bash") {
- l.debug("posttooluse-bash-chain-skip", { reason: "not_bash_tool", toolName });
- return null;
- }
- const toolInput = input.tool_input || {};
- const command = toolInput.command || "";
- if (!command) {
- l.debug("posttooluse-bash-chain-skip", { reason: "no_command" });
- return null;
- }
- const sessionId = resolveSessionId(input);
- const platform = detectPlatform(input);
- return { command, sessionId, platform };
-}
-function runBashChainInjection(packages, sessionId, pluginRoot, logger, env = process.env) {
- const l = logger || log;
- const result = { injected: [], totalBytes: 0 };
- if (packages.length === 0) return result;
- const chainCap = Math.max(
- 1,
- parseInt(env.VERCEL_PLUGIN_CHAIN_CAP || "", 10) || DEFAULT_CHAIN_CAP
- );
- const fileSeen = sessionId ? readSessionFile(sessionId, "seen-skills") : "";
- const seenSet = new Set(fileSeen.split(",").filter(Boolean));
- const targetsSeen = /* @__PURE__ */ new Set();
- for (const pkg of packages) {
- const mapping = PACKAGE_SKILL_MAP[pkg];
- if (!mapping) continue;
- const { skill, message } = mapping;
- if (targetsSeen.has(skill)) continue;
- targetsSeen.add(skill);
- if (result.injected.length >= chainCap) {
- l.debug("posttooluse-bash-chain-cap-reached", {
- cap: chainCap,
- remaining: packages.length - result.injected.length
- });
- break;
- }
- if (seenSet.has(skill)) {
- l.debug("posttooluse-bash-chain-skip-dedup", { pkg, skill });
- continue;
- }
- const skillPath = join(pluginRoot, "skills", skill, "SKILL.md");
- const skillContent = safeReadFile(skillPath);
- if (!skillContent) {
- l.debug("posttooluse-bash-chain-skip-missing", { pkg, skill, path: skillPath });
- continue;
- }
- const { body } = extractFrontmatter(skillContent);
- const trimmedBody = body.trim();
- if (!trimmedBody) continue;
- const bytes = Buffer.byteLength(trimmedBody, "utf-8");
- if (result.totalBytes + bytes > CHAIN_BUDGET_BYTES) {
- l.debug("posttooluse-bash-chain-budget-exceeded", {
- pkg,
- skill,
- bytes,
- totalBytes: result.totalBytes,
- budget: CHAIN_BUDGET_BYTES
- });
- break;
- }
- if (sessionId) {
- const claimed = tryClaimSessionKey(sessionId, "seen-skills", skill);
- if (!claimed) {
- l.debug("posttooluse-bash-chain-skip-concurrent-claim", { pkg, skill });
- seenSet.add(skill);
- continue;
- }
- syncSessionFileFromClaims(sessionId, "seen-skills");
- }
- seenSet.add(skill);
- result.injected.push({ packageName: pkg, skill, message, content: trimmedBody });
- result.totalBytes += bytes;
- l.debug("posttooluse-bash-chain-injected", { pkg, skill, bytes, totalBytes: result.totalBytes });
- }
- if (result.injected.length > 0) {
- l.summary("posttooluse-bash-chain-result", {
- injectedCount: result.injected.length,
- totalBytes: result.totalBytes,
- targets: result.injected.map((i) => i.skill)
- });
- }
- return result;
-}
-function formatPlatformOutput(platform, additionalContext) {
- if (platform === "cursor") {
- return JSON.stringify({ additional_context: additionalContext });
- }
- const output = {
- hookSpecificOutput: {
- hookEventName: "PostToolUse",
- additionalContext
- }
- };
- return JSON.stringify(output);
-}
-function formatBashChainOutput(chainResult, platform = "claude-code") {
- if (chainResult.injected.length === 0) return "{}";
- const parts = [];
- for (const chain of chainResult.injected) {
- parts.push(
- ``,
- `**Skill context auto-loaded** (${chain.skill}): ${chain.message}`,
- "",
- chain.content,
- ``
- );
- }
- const metadata = {
- version: 1,
- hook: "posttooluse-bash-chain",
- packages: chainResult.injected.map((i) => i.packageName),
- chainedSkills: chainResult.injected.map((i) => i.skill)
- };
- parts.push(``);
- return formatPlatformOutput(platform, parts.join("\n"));
-}
-function run() {
- const tStart = log.active ? log.now() : 0;
- let raw;
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return "{}";
- }
- const parsed = parseBashInput(raw, log);
- if (!parsed) return "{}";
- const { command, sessionId, platform } = parsed;
- const packages = parseInstallCommand(command);
- if (packages.length === 0) {
- log.debug("posttooluse-bash-chain-skip", { reason: "no_packages_detected", command });
- return "{}";
- }
- log.debug("posttooluse-bash-chain-packages", { packages, command });
- const chainResult = runBashChainInjection(packages, sessionId, PLUGIN_ROOT, log);
- const output = formatBashChainOutput(chainResult, platform);
- log.complete("posttooluse-bash-chain-done", {
- matchedCount: packages.length,
- injectedCount: chainResult.injected.length,
- dedupedCount: 0,
- cappedCount: 0
- }, log.active ? { total: Math.round(log.now() - tStart) } : {});
- return output;
-}
-function isMainModule() {
- try {
- const scriptPath = realpathSync(resolve(process.argv[1] || ""));
- const modulePath = realpathSync(fileURLToPath(import.meta.url));
- return scriptPath === modulePath;
- } catch {
- return false;
- }
-}
-if (isMainModule()) {
- try {
- const output = run();
- process.stdout.write(output);
- } catch (err) {
- const entry = [
- `[${(/* @__PURE__ */ new Date()).toISOString()}] CRASH in posttooluse-bash-chain.mts`,
- ` error: ${err?.message || String(err)}`,
- ` stack: ${err?.stack || "(no stack)"}`,
- ` PLUGIN_ROOT: ${PLUGIN_ROOT}`,
- ""
- ].join("\n");
- process.stderr.write(entry);
- process.stdout.write("{}");
- }
-}
-export {
- PACKAGE_SKILL_MAP,
- formatBashChainOutput,
- parseBashInput,
- parseInstallCommand,
- run,
- runBashChainInjection
-};
diff --git a/hooks/posttooluse-shadcn-font-fix.mjs b/hooks/posttooluse-shadcn-font-fix.mjs
deleted file mode 100644
index 8491e0f..0000000
--- a/hooks/posttooluse-shadcn-font-fix.mjs
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/env node
-
-/**
- * PostToolUse hook: Fix shadcn init breaking Geist fonts
- *
- * After `npx shadcn init` runs, globals.css gets rewritten with
- * `--font-sans: var(--font-sans)` — a circular self-reference that resolves
- * to nothing, causing fonts to fall back to Times/serif.
- *
- * Tailwind v4's `@theme inline` resolves CSS custom properties at parse time,
- * NOT at runtime. So `var(--font-geist-sans)` also doesn't work because the
- * Next.js font variable is injected via className at runtime.
- *
- * The fix: use literal font family names in @theme inline.
- *
- * This hook also reminds to move font variable classNames from to .
- */
-
-import { readFileSync, writeFileSync, existsSync } from "node:fs";
-import { join } from "node:path";
-
-function detectPlatform(input) {
- if ("conversation_id" in input || "cursor_version" in input) {
- return "cursor";
- }
- return "claude-code";
-}
-
-function resolveWorkspaceRoot(input) {
- if (!Array.isArray(input.workspace_roots)) {
- return null;
- }
-
- for (const root of input.workspace_roots) {
- if (typeof root === "string" && root.trim() !== "") {
- return root;
- }
- }
-
- return null;
-}
-
-function resolveCwd(input) {
- const candidate = input.cwd
- ?? input.working_directory
- ?? resolveWorkspaceRoot(input)
- ?? process.env.CURSOR_PROJECT_DIR
- ?? process.env.CLAUDE_WORKING_DIRECTORY
- ?? process.cwd();
-
- return typeof candidate === "string" && candidate.trim() !== "" ? candidate : process.cwd();
-}
-
-// Read hook input from stdin
-let input = "";
-for await (const chunk of process.stdin) {
- input += chunk;
-}
-
-let parsed;
-try {
- parsed = JSON.parse(input);
-} catch {
- process.exit(0);
-}
-
-const platform = detectPlatform(parsed);
-
-const toolName = parsed.tool_name;
-const toolInput = parsed.tool_input || {};
-
-// Only trigger after Bash commands that look like shadcn init/add
-if (toolName !== "Bash") process.exit(0);
-
-const command = toolInput.command || "";
-if (!command.match(/\bnpx\s+shadcn(@latest)?\s+(init|add)\b/)) process.exit(0);
-
-// Find globals.css — check common locations
-const cwd = resolveCwd(parsed);
-const candidates = [
- join(cwd, "app/globals.css"),
- join(cwd, "src/app/globals.css"),
-];
-
-let globalsPath = null;
-for (const candidate of candidates) {
- if (existsSync(candidate)) {
- globalsPath = candidate;
- break;
- }
-}
-
-if (!globalsPath) process.exit(0);
-
-const content = readFileSync(globalsPath, "utf-8");
-
-// Check for the broken patterns:
-// 1. Circular: --font-sans: var(--font-sans)
-// 2. Runtime var that @theme can't resolve: --font-sans: var(--font-geist-sans)
-const hasBrokenFont =
- content.includes("--font-sans: var(--font-sans)") ||
- content.includes("--font-sans: var(--font-geist-sans)");
-
-if (!hasBrokenFont) process.exit(0);
-
-// Fix with literal font names that @theme inline can resolve at parse time
-let fixed = content.replace(
- /--font-sans:\s*var\(--font-(?:sans|geist-sans)\)/g,
- '--font-sans: "Geist", "Geist Fallback", ui-sans-serif, system-ui, sans-serif'
-);
-
-// Also fix mono if present
-fixed = fixed.replace(
- /--font-mono:\s*var\(--font-(?:mono|geist-mono)\)/g,
- '--font-mono: "Geist Mono", "Geist Mono Fallback", ui-monospace, monospace'
-);
-
-writeFileSync(globalsPath, fixed, "utf-8");
-
-const relPath = globalsPath.replace(cwd + "/", "");
-
-// Output a detailed explanation to the conversation
-const message = `
-**Auto-fix applied to \`${relPath}\`**: Replaced CSS variable font references with literal Geist font family names.
-
-### Why this fix was needed
-
-\`shadcn init\` rewrites \`globals.css\` and sets \`--font-sans: var(--font-sans)\` inside \`@theme inline\` — a circular self-reference that resolves to nothing, causing all text to fall back to Times/serif.
-
-**Why \`var(--font-geist-sans)\` also doesn't work**: Tailwind v4's \`@theme inline\` block resolves CSS custom properties at **CSS parse time** (build time), not at runtime. Next.js \`next/font\` injects \`--font-geist-sans\` via a className on \`\` at **runtime**, so it doesn't exist when \`@theme inline\` evaluates. The variable resolves to empty, and fonts fall back to the browser default serif.
-
-**The correct fix** (applied automatically):
-\`\`\`css
-@theme inline {
- --font-sans: "Geist", "Geist Fallback", ui-sans-serif, system-ui, sans-serif;
- --font-mono: "Geist Mono", "Geist Mono Fallback", ui-monospace, monospace;
-}
-\`\`\`
-
-### Second required fix (manual)
-
-You MUST also move the font variable classNames from \`\` to \`\` in \`layout.tsx\`. The default \`create-next-app\` scaffold puts them on \`\`, but they need to be on \`\` so the \`@font-face\` declarations are scoped to the document root:
-
-\`\`\`tsx
-// CORRECT — font variables on
-
-
-
-// WRONG — default scaffold puts them on
-
-
-\`\`\`
-`;
-
-const result = platform === "cursor"
- ? { additional_context: message }
- : { additionalContext: message };
-
-console.log(JSON.stringify(result));
diff --git a/hooks/posttooluse-telemetry.mjs b/hooks/posttooluse-telemetry.mjs
deleted file mode 100755
index 75c7a13..0000000
--- a/hooks/posttooluse-telemetry.mjs
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/posttooluse-telemetry.mts
-import { readFileSync } from "fs";
-function parseStdin() {
- try {
- const raw = readFileSync(0, "utf-8").trim();
- if (!raw) return null;
- return JSON.parse(raw);
- } catch {
- return null;
- }
-}
-async function main() {
- parseStdin();
- process.stdout.write("{}");
- process.exit(0);
-}
-main();
diff --git a/hooks/posttooluse-validate.mjs b/hooks/posttooluse-validate.mjs
deleted file mode 100755
index 9a51bf2..0000000
--- a/hooks/posttooluse-validate.mjs
+++ /dev/null
@@ -1,598 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/posttooluse-validate.mts
-import { createHash } from "crypto";
-import { readFileSync, realpathSync } from "fs";
-import { join, resolve } from "path";
-import { fileURLToPath } from "url";
-import { detectPlatform } from "./compat.mjs";
-import {
- pluginRoot as resolvePluginRoot,
- readSessionFile,
- safeReadFile,
- writeSessionFile,
- tryClaimSessionKey,
- syncSessionFileFromClaims
-} from "./hook-env.mjs";
-import { buildSkillMap, extractFrontmatter } from "./skill-map-frontmatter.mjs";
-import {
- compileSkillPatterns,
- matchPathWithReason,
- matchImportWithReason
-} from "./patterns.mjs";
-import { createLogger } from "./logger.mjs";
-var PLUGIN_ROOT = resolvePluginRoot();
-var SUPPORTED_TOOLS = ["Write", "Edit"];
-var VALIDATED_FILES_ENV_KEY = "VERCEL_PLUGIN_VALIDATED_FILES";
-var CHAIN_BUDGET_BYTES = 18e3;
-var DEFAULT_CHAIN_CAP = 2;
-function resolveToolFilePaths(toolInput) {
- const collected = [];
- const pushPath = (value) => {
- if (typeof value !== "string") return;
- const trimmed = value.trim();
- if (trimmed !== "") {
- collected.push(trimmed);
- }
- };
- pushPath(toolInput.file_path);
- if (Array.isArray(toolInput.file_paths)) {
- for (const value of toolInput.file_paths) {
- pushPath(value);
- }
- }
- if (Array.isArray(toolInput.files)) {
- for (const value of toolInput.files) {
- if (typeof value === "string") {
- pushPath(value);
- continue;
- }
- if (value && typeof value === "object" && "file_path" in value) {
- pushPath(value.file_path);
- }
- }
- }
- return [...new Set(collected)];
-}
-function resolveSessionId(input) {
- const sessionId = input.session_id ?? input.conversation_id;
- return typeof sessionId === "string" && sessionId.trim() !== "" ? sessionId : null;
-}
-function resolveHookCwd(input, env) {
- const workspaceRoot = Array.isArray(input.workspace_roots) ? input.workspace_roots[0] : void 0;
- const candidate = input.cwd ?? workspaceRoot ?? env.CURSOR_PROJECT_DIR ?? env.CLAUDE_PROJECT_ROOT ?? process.cwd();
- return typeof candidate === "string" && candidate.trim() !== "" ? candidate : process.cwd();
-}
-function formatPlatformOutput(platform, additionalContext, env) {
- if (!additionalContext) {
- return "{}";
- }
- if (platform === "cursor") {
- const output2 = {
- additional_context: additionalContext
- };
- if (env && Object.keys(env).length > 0) {
- output2.env = env;
- }
- return JSON.stringify(output2);
- }
- const output = {
- hookSpecificOutput: {
- hookEventName: "PostToolUse",
- additionalContext
- }
- };
- return JSON.stringify(output);
-}
-function validationRuleId(skill, rule) {
- return `${skill}::${rule.pattern}`;
-}
-var log = createLogger();
-function parseInput(raw, logger, env = process.env) {
- const l = logger || log;
- const trimmed = (raw || "").trim();
- if (!trimmed) {
- l.debug("posttooluse-validate-skip", { reason: "stdin_empty" });
- return null;
- }
- let input;
- try {
- input = JSON.parse(trimmed);
- } catch {
- l.debug("posttooluse-validate-skip", { reason: "stdin_parse_fail" });
- return null;
- }
- const toolName = input.tool_name || "";
- if (!SUPPORTED_TOOLS.includes(toolName)) {
- l.debug("posttooluse-validate-skip", { reason: "unsupported_tool", toolName });
- return null;
- }
- const toolInput = input.tool_input || {};
- const filePaths = resolveToolFilePaths(toolInput);
- const filePath = filePaths[0] || "";
- if (!filePath) {
- l.debug("posttooluse-validate-skip", { reason: "no_file_path", toolName });
- return null;
- }
- const sessionId = resolveSessionId(input);
- const cwd = resolveHookCwd(input, env);
- const platform = detectPlatform(input);
- l.debug("posttooluse-validate-input", {
- toolName,
- filePath,
- filePathsCount: filePaths.length,
- sessionId,
- cwd,
- platform
- });
- return { toolName, filePath, filePaths, sessionId, cwd, platform };
-}
-function loadValidateRules(pluginRoot, logger) {
- const l = logger || log;
- const skillsDir = join(pluginRoot, "skills");
- const { skills: skillMap } = buildSkillMap(skillsDir);
- const rulesMap = /* @__PURE__ */ new Map();
- const chainMap = /* @__PURE__ */ new Map();
- for (const [slug, config] of Object.entries(skillMap)) {
- if (config.validate && config.validate.length > 0) {
- rulesMap.set(slug, config.validate);
- }
- if (config.chainTo && config.chainTo.length > 0) {
- chainMap.set(slug, config.chainTo);
- }
- }
- if (rulesMap.size === 0 && chainMap.size === 0) {
- l.debug("posttooluse-validate-skip", { reason: "no_validate_rules" });
- return null;
- }
- const compiledSkills = compileSkillPatterns(skillMap);
- l.debug("posttooluse-validate-loaded", {
- totalSkills: Object.keys(skillMap).length,
- skillsWithRules: rulesMap.size,
- skillsWithChainTo: chainMap.size
- });
- return { skillMap, compiledSkills, rulesMap, chainMap };
-}
-function matchFileToSkills(filePath, fileContent, compiledSkills, rulesMap, logger, chainMap) {
- const l = logger || log;
- const matched = [];
- for (const entry of compiledSkills) {
- if (!rulesMap.has(entry.skill) && !chainMap?.has(entry.skill)) continue;
- const pathMatch = matchPathWithReason(filePath, entry.compiledPaths);
- if (pathMatch) {
- matched.push(entry.skill);
- l.trace("posttooluse-validate-match", {
- skill: entry.skill,
- matchType: "path",
- pattern: pathMatch.pattern
- });
- continue;
- }
- const importMatch = matchImportWithReason(fileContent, entry.compiledImports);
- if (importMatch) {
- matched.push(entry.skill);
- l.trace("posttooluse-validate-match", {
- skill: entry.skill,
- matchType: "import",
- pattern: importMatch.pattern
- });
- }
- }
- l.debug("posttooluse-validate-matched", { matchedSkills: matched });
- return matched;
-}
-function runValidation(fileContent, matchedSkills, rulesMap, logger, filePath) {
- const l = logger || log;
- const violations = [];
- const lines = fileContent.split("\n");
- for (const skill of matchedSkills) {
- const rules = rulesMap.get(skill);
- if (!rules) continue;
- for (const rule of rules) {
- const ruleId = validationRuleId(skill, rule);
- if (rule.skipIfFileContains) {
- try {
- if (new RegExp(rule.skipIfFileContains, "m").test(fileContent)) {
- l.trace("posttooluse-validate-rule-skip", {
- skill,
- pattern: rule.pattern,
- reason: "skipIfFileContains matched"
- });
- continue;
- }
- } catch {
- }
- }
- let regex;
- try {
- regex = new RegExp(rule.pattern, "g");
- } catch {
- l.debug("posttooluse-validate-regex-fail", {
- skill,
- pattern: rule.pattern
- });
- continue;
- }
- for (let i = 0; i < lines.length; i++) {
- regex.lastIndex = 0;
- const match = regex.exec(lines[i]);
- if (match) {
- violations.push({
- skill,
- line: i + 1,
- message: rule.message,
- severity: rule.severity,
- matchedText: match[0].slice(0, 80),
- filePath,
- ruleId,
- upgradeToSkill: rule.upgradeToSkill,
- upgradeWhy: rule.upgradeWhy,
- upgradeMode: rule.upgradeMode ?? (rule.upgradeToSkill ? "soft" : void 0)
- });
- }
- }
- }
- }
- l.debug("posttooluse-validate-violations", {
- total: violations.length,
- errors: violations.filter((v) => v.severity === "error").length,
- recommended: violations.filter((v) => v.severity === "recommended").length,
- warns: violations.filter((v) => v.severity === "warn").length
- });
- return violations;
-}
-function runChainInjection(fileContent, matchedSkills, chainMap, sessionId, pluginRoot, logger, env = process.env) {
- const l = logger || log;
- const result = { injected: [], totalBytes: 0 };
- const chainCap = Math.max(1, parseInt(env.VERCEL_PLUGIN_CHAIN_CAP || "", 10) || DEFAULT_CHAIN_CAP);
- const candidates = [];
- for (const skill of matchedSkills) {
- const rules = chainMap.get(skill);
- if (!rules) continue;
- for (const rule of rules) {
- if (rule.skipIfFileContains) {
- try {
- if (new RegExp(rule.skipIfFileContains, "m").test(fileContent)) {
- l.debug("posttooluse-chain-skip-contains", {
- skill,
- targetSkill: rule.targetSkill,
- reason: "skipIfFileContains matched"
- });
- continue;
- }
- } catch {
- }
- }
- try {
- const regex = new RegExp(rule.pattern, "m");
- if (regex.test(fileContent)) {
- candidates.push({ sourceSkill: skill, rule });
- }
- } catch {
- l.debug("posttooluse-chain-regex-fail", {
- skill,
- pattern: rule.pattern
- });
- }
- }
- }
- if (candidates.length === 0) return result;
- const seenTargets = /* @__PURE__ */ new Set();
- const uniqueCandidates = candidates.filter(({ rule }) => {
- if (seenTargets.has(rule.targetSkill)) return false;
- seenTargets.add(rule.targetSkill);
- return true;
- });
- const fileSeen = sessionId ? readSessionFile(sessionId, "seen-skills") : "";
- const seenSet = new Set(fileSeen.split(",").filter(Boolean));
- for (const { sourceSkill, rule } of uniqueCandidates) {
- if (result.injected.length >= chainCap) {
- l.debug("posttooluse-chain-cap-reached", {
- cap: chainCap,
- remaining: uniqueCandidates.length - result.injected.length
- });
- break;
- }
- if (seenSet.has(rule.targetSkill)) {
- l.debug("posttooluse-chain-skip-dedup", {
- sourceSkill,
- targetSkill: rule.targetSkill
- });
- continue;
- }
- const skillPath = join(pluginRoot, "skills", rule.targetSkill, "SKILL.md");
- const skillContent = safeReadFile(skillPath);
- if (!skillContent) {
- l.debug("posttooluse-chain-skip-missing", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- path: skillPath
- });
- continue;
- }
- const { body } = extractFrontmatter(skillContent);
- const trimmedBody = body.trim();
- if (!trimmedBody) continue;
- const bytes = Buffer.byteLength(trimmedBody, "utf-8");
- if (result.totalBytes + bytes > CHAIN_BUDGET_BYTES) {
- l.debug("posttooluse-chain-budget-exceeded", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- bytes,
- totalBytes: result.totalBytes,
- budget: CHAIN_BUDGET_BYTES
- });
- break;
- }
- if (sessionId) {
- const claimed = tryClaimSessionKey(sessionId, "seen-skills", rule.targetSkill);
- if (!claimed) {
- l.debug("posttooluse-chain-skip-concurrent-claim", {
- sourceSkill,
- targetSkill: rule.targetSkill
- });
- seenSet.add(rule.targetSkill);
- continue;
- }
- syncSessionFileFromClaims(sessionId, "seen-skills");
- }
- seenSet.add(rule.targetSkill);
- result.injected.push({
- sourceSkill,
- targetSkill: rule.targetSkill,
- message: rule.message,
- content: trimmedBody
- });
- result.totalBytes += bytes;
- l.debug("posttooluse-chain-injected", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- bytes,
- totalBytes: result.totalBytes
- });
- }
- if (result.injected.length > 0) {
- l.summary("posttooluse-chain-result", {
- injectedCount: result.injected.length,
- totalBytes: result.totalBytes,
- targets: result.injected.map((i) => i.targetSkill)
- });
- }
- return result;
-}
-function contentHash(content) {
- return createHash("md5").update(content).digest("hex").slice(0, 12);
-}
-function parseValidatedFiles(envValue) {
- if (typeof envValue !== "string" || envValue.trim() === "") {
- return /* @__PURE__ */ new Set();
- }
- const set = /* @__PURE__ */ new Set();
- for (const part of envValue.split(",")) {
- const trimmed = part.trim();
- if (trimmed !== "") set.add(trimmed);
- }
- return set;
-}
-function appendValidatedFile(envValue, entry) {
- const current = typeof envValue === "string" ? envValue.trim() : "";
- return current === "" ? entry : `${current},${entry}`;
-}
-function isAlreadyValidated(filePath, hash, sessionId) {
- const entry = `${filePath}:${hash}`;
- const validated = parseValidatedFiles(process.env.VERCEL_PLUGIN_VALIDATED_FILES);
- if (validated.has(entry)) {
- return true;
- }
- if (!sessionId) {
- return false;
- }
- const persisted = parseValidatedFiles(readSessionFile(sessionId, "validated-files"));
- return persisted.has(entry);
-}
-function markValidated(filePath, hash, sessionId) {
- const entry = `${filePath}:${hash}`;
- const persistedState = sessionId ? readSessionFile(sessionId, "validated-files") : "";
- const current = process.env[VALIDATED_FILES_ENV_KEY] || persistedState;
- const next = appendValidatedFile(current, entry);
- process.env[VALIDATED_FILES_ENV_KEY] = next;
- if (sessionId) {
- writeSessionFile(sessionId, "validated-files", next);
- }
- return next;
-}
-function formatOutput(violations, matchedSkills, filePath, logger, platform = "claude-code", env, chainResult) {
- const l = logger || log;
- const hasChains = chainResult && chainResult.injected.length > 0;
- if (violations.length === 0 && !hasChains) {
- l.debug("posttooluse-validate-no-output", { reason: "no_actionable_violations" });
- return "{}";
- }
- const errors = violations.filter((v) => v.severity === "error");
- const recommended = violations.filter((v) => v.severity === "recommended");
- const warns = violations.filter((v) => v.severity === "warn");
- const hasErrors = errors.length > 0;
- const hasRecommended = recommended.length > 0;
- const hasWarns = warns.length > 0;
- const bySkill = /* @__PURE__ */ new Map();
- for (const v of violations) {
- if (!bySkill.has(v.skill)) bySkill.set(v.skill, []);
- bySkill.get(v.skill).push(v);
- }
- const emittedUpgradeSkills = /* @__PURE__ */ new Set();
- const formatViolationLine = (violation, label) => {
- const lines = [`- Line ${violation.line} [${label}]: ${violation.message}`];
- if (violation.upgradeToSkill && !emittedUpgradeSkills.has(violation.upgradeToSkill)) {
- emittedUpgradeSkills.add(violation.upgradeToSkill);
- const reason = violation.upgradeWhy ? ` Reason: ${violation.upgradeWhy}` : "";
- const prefix = violation.upgradeMode === "hard" ? "REQUIRED: " : "";
- lines.push("");
- lines.push(`${prefix}Use the Skill tool now to load ${violation.upgradeToSkill}.${reason}`);
- lines.push(
- ``
- );
- }
- return lines.join("\n");
- };
- const parts = [];
- for (const [skill, skillViolations] of bySkill) {
- const errorLines = skillViolations.filter((v) => v.severity === "error").map((v) => formatViolationLine(v, "ERROR"));
- const recommendedLines = skillViolations.filter((v) => v.severity === "recommended").map((v) => formatViolationLine(v, "RECOMMENDED"));
- const warnLines = skillViolations.filter((v) => v.severity === "warn").map((v) => formatViolationLine(v, "SUGGESTION"));
- parts.push([...errorLines, ...recommendedLines, ...warnLines].join("\n"));
- }
- const skillList = [...bySkill.keys()].join(", ");
- const counts = [
- hasErrors ? `${errors.length} error${errors.length > 1 ? "s" : ""}` : "",
- hasRecommended ? `${recommended.length} recommendation${recommended.length > 1 ? "s" : ""}` : "",
- hasWarns ? `${warns.length} suggestion${warns.length > 1 ? "s" : ""}` : ""
- ].filter(Boolean).join(", ");
- const callToAction = hasErrors ? `Please fix these issues before proceeding.` : hasRecommended ? `Apply these recommendations before continuing \u2014 they reflect current best practices.` : `Consider applying these suggestions to follow best practices.`;
- const contextParts = [];
- if (violations.length > 0) {
- contextParts.push(
- ``,
- `VALIDATION (${counts}) for \`${filePath}\`:`,
- ...parts,
- callToAction,
- ``
- );
- }
- if (hasChains) {
- for (const chain of chainResult.injected) {
- const reason = chain.message ? ` ${chain.message}` : "";
- contextParts.push(
- ``,
- `**Skill context auto-loaded** (${chain.targetSkill}):${reason}`,
- "",
- chain.content,
- ``
- );
- }
- }
- const context = contextParts.join("\n");
- const chainedSkills = hasChains ? chainResult.injected.map((c) => c.targetSkill) : [];
- const metadata = {
- version: 1,
- hook: "posttooluse-validate",
- filePath,
- matchedSkills,
- errorCount: errors.length,
- recommendedCount: recommended.length,
- warnCount: warns.length,
- chainedSkills
- };
- const metaComment = ``;
- l.summary("posttooluse-validate-output", {
- filePath,
- matchedSkills,
- errorCount: errors.length,
- recommendedCount: recommended.length,
- warnCount: warns.length,
- chainedSkills
- });
- return formatPlatformOutput(platform, context + "\n" + metaComment, env);
-}
-function run() {
- const timing = {};
- const tStart = log.active ? log.now() : 0;
- let raw;
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return "{}";
- }
- const parsed = parseInput(raw, log);
- if (!parsed) return "{}";
- if (log.active) timing.parse = Math.round(log.now() - tStart);
- const { toolName, filePath, sessionId, cwd, platform } = parsed;
- const resolvedPath = cwd ? resolve(cwd, filePath) : filePath;
- const fileContent = safeReadFile(resolvedPath);
- if (!fileContent) {
- log.debug("posttooluse-validate-skip", { reason: "file_unreadable", filePath: resolvedPath });
- return "{}";
- }
- const hash = contentHash(fileContent);
- if (isAlreadyValidated(filePath, hash, sessionId)) {
- log.debug("posttooluse-validate-skip", { reason: "already_validated", filePath, hash });
- return "{}";
- }
- const tLoad = log.active ? log.now() : 0;
- const data = loadValidateRules(PLUGIN_ROOT, log);
- if (!data) return "{}";
- if (log.active) timing.load = Math.round(log.now() - tLoad);
- const { compiledSkills, rulesMap, chainMap } = data;
- const tMatch = log.active ? log.now() : 0;
- const matchedSkills = matchFileToSkills(filePath, fileContent, compiledSkills, rulesMap, log, chainMap);
- if (log.active) timing.match = Math.round(log.now() - tMatch);
- if (matchedSkills.length === 0) {
- log.debug("posttooluse-validate-skip", { reason: "no_skill_match", filePath });
- markValidated(filePath, hash, sessionId);
- return "{}";
- }
- const tValidate = log.active ? log.now() : 0;
- const violations = runValidation(fileContent, matchedSkills, rulesMap, log);
- if (log.active) timing.validate = Math.round(log.now() - tValidate);
- const tChain = log.active ? log.now() : 0;
- const chainResult = runChainInjection(
- fileContent,
- matchedSkills,
- chainMap,
- sessionId,
- PLUGIN_ROOT,
- log
- );
- if (log.active) timing.chain = Math.round(log.now() - tChain);
- const validatedFiles = markValidated(filePath, hash, sessionId);
- const hasOutput = violations.length > 0 || chainResult.injected.length > 0;
- const cursorEnv = platform === "cursor" && hasOutput ? { [VALIDATED_FILES_ENV_KEY]: validatedFiles } : void 0;
- const result = formatOutput(violations, matchedSkills, filePath, log, platform, cursorEnv, chainResult);
- log.complete("posttooluse-validate-done", {
- matchedCount: matchedSkills.length,
- injectedCount: violations.filter((v) => v.severity === "error").length
- }, timing);
- return result;
-}
-function isMainModule() {
- try {
- const scriptPath = realpathSync(resolve(process.argv[1] || ""));
- const modulePath = realpathSync(fileURLToPath(import.meta.url));
- return scriptPath === modulePath;
- } catch {
- return false;
- }
-}
-if (isMainModule()) {
- try {
- const output = run();
- process.stdout.write(output);
- } catch (err) {
- const entry = [
- `[${(/* @__PURE__ */ new Date()).toISOString()}] CRASH in posttooluse-validate.mts`,
- ` error: ${err?.message || String(err)}`,
- ` stack: ${err?.stack || "(no stack)"}`,
- ` PLUGIN_ROOT: ${PLUGIN_ROOT}`,
- ""
- ].join("\n");
- process.stderr.write(entry);
- process.stdout.write("{}");
- }
-}
-export {
- appendValidatedFile,
- contentHash,
- formatOutput,
- isAlreadyValidated,
- loadValidateRules,
- markValidated,
- matchFileToSkills,
- parseInput,
- parseValidatedFiles,
- run,
- runChainInjection,
- runValidation
-};
diff --git a/hooks/posttooluse-verification-observe.mjs b/hooks/posttooluse-verification-observe.mjs
deleted file mode 100755
index 23cc27a..0000000
--- a/hooks/posttooluse-verification-observe.mjs
+++ /dev/null
@@ -1,155 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/posttooluse-verification-observe.mts
-import { readFileSync, realpathSync } from "fs";
-import { resolve } from "path";
-import { fileURLToPath } from "url";
-import { generateVerificationId } from "./hook-env.mjs";
-import { createLogger } from "./logger.mjs";
-function isVerificationReport(value) {
- if (typeof value !== "object" || value === null) return false;
- const obj = value;
- return obj.type === "verification.report/v1" && typeof obj.verificationId === "string" && Array.isArray(obj.boundaries) && obj.boundaries.every(
- (b) => typeof b === "object" && b !== null && b.event === "verification.boundary_observed"
- );
-}
-var BOUNDARY_PATTERNS = [
- // uiRender: browser/screenshot/playwright/puppeteer commands
- { boundary: "uiRender", pattern: /\b(open|launch|browse|screenshot|puppeteer|playwright|chromium|firefox|webkit)\b/i, label: "browser-tool" },
- { boundary: "uiRender", pattern: /\bopen\s+https?:/i, label: "open-url" },
- { boundary: "uiRender", pattern: /\bnpx\s+playwright\b/i, label: "playwright-cli" },
- // clientRequest: curl, fetch, wget, httpie
- { boundary: "clientRequest", pattern: /\b(curl|wget|http|httpie)\b/i, label: "http-client" },
- { boundary: "clientRequest", pattern: /\bfetch\s*\(/i, label: "fetch-call" },
- { boundary: "clientRequest", pattern: /\bnpx\s+undici\b/i, label: "undici-cli" },
- // serverHandler: log tailing, server process inspection
- { boundary: "serverHandler", pattern: /\b(tail|less|cat)\b.*\.(log|out|err)\b/i, label: "log-tail" },
- { boundary: "serverHandler", pattern: /\b(tail\s+-f|journalctl\s+-f)\b/i, label: "log-follow" },
- { boundary: "serverHandler", pattern: /\blog(s)?\s/i, label: "log-command" },
- { boundary: "serverHandler", pattern: /\b(vercel\s+logs|vercel\s+inspect)\b/i, label: "vercel-logs" },
- { boundary: "serverHandler", pattern: /\b(lsof|netstat|ss)\s.*:(3000|3001|4000|5173|8080)\b/i, label: "port-inspect" },
- // environment: env reads, config inspection
- { boundary: "environment", pattern: /\b(printenv|env\b|echo\s+\$)/i, label: "env-read" },
- { boundary: "environment", pattern: /\bvercel\s+env\b/i, label: "vercel-env" },
- { boundary: "environment", pattern: /\bcat\b.*\.env\b/i, label: "dotenv-read" },
- { boundary: "environment", pattern: /\bnode\s+-e\b.*process\.env\b/i, label: "node-env" }
-];
-function classifyBoundary(command) {
- for (const bp of BOUNDARY_PATTERNS) {
- if (bp.pattern.test(command)) {
- return { boundary: bp.boundary, matchedPattern: bp.label };
- }
- }
- return { boundary: "unknown", matchedPattern: "none" };
-}
-var ROUTE_REGEX = /\b(?:app|pages|src\/pages|src\/app)\/([\w[\].-]+(?:\/[\w[\].-]+)*)/;
-var URL_ROUTE_REGEX = /https?:\/\/[^/\s]+(\/([\w-]+(?:\/[\w-]+)*))/;
-function inferRoute(command, recentEdits) {
- if (recentEdits) {
- const paths = recentEdits.split(",").map((p) => p.trim()).filter(Boolean);
- for (const p of paths) {
- const match = ROUTE_REGEX.exec(p);
- if (match) {
- const route = "/" + match[1].replace(/\/page\.\w+$/, "").replace(/\/route\.\w+$/, "").replace(/\/layout\.\w+$/, "").replace(/\/loading\.\w+$/, "").replace(/\/error\.\w+$/, "").replace(/\[([^\]]+)\]/g, ":$1");
- return route === "/" ? "/" : route.replace(/\/$/, "");
- }
- }
- }
- const urlMatch = URL_ROUTE_REGEX.exec(command);
- if (urlMatch && urlMatch[1]) {
- return urlMatch[1];
- }
- return null;
-}
-function parseInput(raw, logger) {
- const trimmed = (raw || "").trim();
- if (!trimmed) return null;
- let input;
- try {
- input = JSON.parse(trimmed);
- } catch {
- return null;
- }
- const toolName = input.tool_name || "";
- if (toolName !== "Bash") return null;
- const toolInput = input.tool_input || {};
- const command = toolInput.command || "";
- if (!command) return null;
- const sessionId = input.session_id || null;
- const cwdCandidate = input.cwd ?? input.working_directory;
- const cwd = typeof cwdCandidate === "string" && cwdCandidate.trim() !== "" ? cwdCandidate : null;
- return { command, sessionId, cwd };
-}
-function run(rawInput) {
- const log = createLogger();
- let raw;
- if (rawInput !== void 0) {
- raw = rawInput;
- } else {
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return "{}";
- }
- }
- const parsed = parseInput(raw, log);
- if (!parsed) {
- log.debug("verification-observe-skip", { reason: "no_bash_input" });
- return "{}";
- }
- const { command, sessionId } = parsed;
- const { boundary, matchedPattern } = classifyBoundary(command);
- if (boundary === "unknown") {
- log.trace("verification-observe-skip", { reason: "no_boundary_match", command: command.slice(0, 120) });
- return "{}";
- }
- const verificationId = generateVerificationId();
- const recentEdits = process.env.VERCEL_PLUGIN_RECENT_EDITS || "";
- const inferredRoute = inferRoute(command, recentEdits);
- const boundaryEvent = {
- event: "verification.boundary_observed",
- boundary,
- verificationId,
- command: command.slice(0, 200),
- matchedPattern,
- inferredRoute,
- timestamp: (/* @__PURE__ */ new Date()).toISOString()
- };
- log.summary("verification.boundary_observed", boundaryEvent);
- log.complete("verification-observe-done", {
- matchedCount: 1,
- injectedCount: 0
- });
- return "{}";
-}
-function isMainModule() {
- try {
- const scriptPath = realpathSync(resolve(process.argv[1] || ""));
- const modulePath = realpathSync(fileURLToPath(import.meta.url));
- return scriptPath === modulePath;
- } catch {
- return false;
- }
-}
-if (isMainModule()) {
- try {
- const output = run();
- process.stdout.write(output);
- } catch (err) {
- const entry = [
- `[${(/* @__PURE__ */ new Date()).toISOString()}] CRASH in posttooluse-verification-observe.mts`,
- ` error: ${err?.message || String(err)}`,
- ` stack: ${err?.stack || "(no stack)"}`,
- ""
- ].join("\n");
- process.stderr.write(entry);
- process.stdout.write("{}");
- }
-}
-export {
- classifyBoundary,
- inferRoute,
- isVerificationReport,
- parseInput,
- run
-};
diff --git a/hooks/pretooluse-skill-inject.mjs b/hooks/pretooluse-skill-inject.mjs
index c9ebf86..756b040 100644
--- a/hooks/pretooluse-skill-inject.mjs
+++ b/hooks/pretooluse-skill-inject.mjs
@@ -33,7 +33,6 @@ import {
} from "./patterns.mjs";
import { resolveVercelJsonSkills, isVercelJsonPath, VERCEL_JSON_SKILLS } from "./vercel-config.mjs";
import { createLogger, logDecision } from "./logger.mjs";
-import { trackBaseEvents } from "./telemetry.mjs";
import { selectManagedContextChunk } from "./vercel-context.mjs";
var MAX_SKILLS = 3;
var DEFAULT_INJECTION_BUDGET_BYTES = 18e3;
@@ -604,13 +603,6 @@ function run() {
if (log.active) timing.stdin_parse = Math.round(log.now() - tPhase);
const { toolName, toolInput, sessionId, cwd, platform, toolTarget, scopeId } = parsed;
const runtimeEnvBefore = captureRuntimeEnvSnapshot();
- if (sessionId) {
- const toolEntries = [
- { key: "tool_call:tool_name", value: toolName }
- ];
- trackBaseEvents(sessionId, toolEntries).catch(() => {
- });
- }
const tSkillmap = log.active ? log.now() : 0;
const skills = loadSkills(PLUGIN_ROOT, log);
if (!skills) return "{}";
@@ -801,21 +793,6 @@ function run() {
droppedByCap,
droppedByBudget
}, cwd);
- if (sessionId) {
- const telemetryEntries = [];
- for (const skill of loaded) {
- const reason = matchReasons?.[skill];
- telemetryEntries.push(
- { key: "skill:injected", value: skill },
- { key: "skill:hook", value: "PreToolUse" },
- { key: "skill:priority", value: "0" },
- { key: "skill:match_type", value: reason?.matchType ?? "unknown" },
- { key: "skill:tool_name", value: toolName }
- );
- }
- trackBaseEvents(sessionId, telemetryEntries).catch(() => {
- });
- }
}
return result;
}
diff --git a/hooks/pretooluse-subagent-spawn-observe.mjs b/hooks/pretooluse-subagent-spawn-observe.mjs
deleted file mode 100644
index 6f1cade..0000000
--- a/hooks/pretooluse-subagent-spawn-observe.mjs
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/pretooluse-subagent-spawn-observe.mts
-import { readFileSync } from "fs";
-import { resolve } from "path";
-import { fileURLToPath } from "url";
-import { appendPendingLaunch } from "./subagent-state.mjs";
-import { createLogger, logCaughtError } from "./logger.mjs";
-var log = createLogger();
-var EMPTY_OUTPUT = "{}";
-function isRecord(value) {
- return typeof value === "object" && value !== null && !Array.isArray(value);
-}
-function resolveSessionId(input, env) {
- if (typeof input.session_id === "string" && input.session_id.trim() !== "") {
- return input.session_id;
- }
- if (typeof env.SESSION_ID === "string" && env.SESSION_ID.trim() !== "") {
- return env.SESSION_ID;
- }
- return null;
-}
-function parseInput(raw, env = process.env) {
- const trimmed = (raw || "").trim();
- if (!trimmed) {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "stdin_empty" });
- return null;
- }
- let input;
- try {
- const parsed = JSON.parse(trimmed);
- if (!isRecord(parsed)) {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "stdin_not_object" });
- return null;
- }
- input = parsed;
- } catch {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "stdin_parse_fail" });
- return null;
- }
- const toolName = typeof input.tool_name === "string" ? input.tool_name : "";
- if (toolName !== "Agent") {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "unsupported_tool", toolName });
- return null;
- }
- const sessionId = resolveSessionId(input, env);
- if (!sessionId) {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "missing_session_id" });
- return null;
- }
- const toolInput = isRecord(input.tool_input) ? input.tool_input : {};
- return { sessionId, toolInput };
-}
-function buildPendingLaunchRecord(toolInput, createdAt) {
- const resume = typeof toolInput.resume === "string" ? toolInput.resume : void 0;
- const name = typeof toolInput.name === "string" ? toolInput.name : void 0;
- const pendingLaunch = {
- description: typeof toolInput.description === "string" ? toolInput.description : "",
- prompt: typeof toolInput.prompt === "string" ? toolInput.prompt : "",
- subagent_type: typeof toolInput.subagent_type === "string" ? toolInput.subagent_type : "",
- createdAt,
- ...resume !== void 0 ? { resume } : {},
- ...name !== void 0 ? { name } : {}
- };
- return pendingLaunch;
-}
-function writePendingLaunchRecord(sessionId, toolInput) {
- const createdAt = Date.now();
- const payload = buildPendingLaunchRecord(toolInput, createdAt);
- appendPendingLaunch(sessionId, payload);
- log.debug("pretooluse-subagent-spawn-observe-recorded", {
- sessionId,
- subagentType: typeof payload.subagent_type === "string" ? payload.subagent_type : null,
- name: typeof payload.name === "string" ? payload.name : null
- });
- return sessionId;
-}
-function run(rawInput) {
- let raw = rawInput;
- if (raw === void 0) {
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return EMPTY_OUTPUT;
- }
- }
- const parsed = parseInput(raw);
- if (!parsed) {
- return EMPTY_OUTPUT;
- }
- try {
- writePendingLaunchRecord(parsed.sessionId, parsed.toolInput);
- } catch (error) {
- logCaughtError(log, "pretooluse-subagent-spawn-observe-write-failed", error, {
- attempted: "write_pending_launch_record",
- sessionId: parsed.sessionId,
- state: "launch_observation_failed"
- });
- }
- return EMPTY_OUTPUT;
-}
-function isMainModule() {
- const entrypoint = fileURLToPath(import.meta.url);
- return process.argv[1] ? resolve(process.argv[1]) === entrypoint : false;
-}
-if (isMainModule()) {
- process.stdout.write(run());
-}
-export {
- buildPendingLaunchRecord,
- parseInput,
- run,
- writePendingLaunchRecord
-};
diff --git a/hooks/session-end-cleanup.mjs b/hooks/session-end-cleanup.mjs
index 5ac02f1..6845cd5 100755
--- a/hooks/session-end-cleanup.mjs
+++ b/hooks/session-end-cleanup.mjs
@@ -2,8 +2,8 @@
// hooks/src/session-end-cleanup.mts
import { createHash } from "crypto";
-import { readdirSync, readFileSync, rmSync, unlinkSync, writeFileSync } from "fs";
-import { homedir, tmpdir } from "os";
+import { readdirSync, readFileSync, rmSync, unlinkSync } from "fs";
+import { tmpdir } from "os";
import { join, resolve } from "path";
import { fileURLToPath } from "url";
var SAFE_SESSION_ID_RE = /^[a-zA-Z0-9_-]+$/;
@@ -42,14 +42,6 @@ function parseSessionIdFromStdin() {
return normalizeSessionEndSessionId(parseSessionEndHookInput(readFileSync(0, "utf8")));
}
function main() {
- try {
- const prefPath = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
- const pref = readFileSync(prefPath, "utf-8").trim();
- if (pref === "asked") {
- writeFileSync(prefPath, "disabled");
- }
- } catch {
- }
const sessionId = parseSessionIdFromStdin();
if (sessionId === null) {
process.exit(0);
@@ -63,7 +55,7 @@ function main() {
}
for (const entry of entries) {
const fullPath = join(tempRoot, entry);
- if (entry.endsWith(".d") || entry.endsWith("-pending-launches")) {
+ if (entry.endsWith(".d")) {
removeDirIfPresent(fullPath);
} else {
removeFileIfPresent(fullPath);
diff --git a/hooks/session-start-activation.mjs b/hooks/session-start-activation.mjs
new file mode 100644
index 0000000..d114c8b
--- /dev/null
+++ b/hooks/session-start-activation.mjs
@@ -0,0 +1,55 @@
+// hooks/src/session-start-activation.mts
+import { existsSync, readdirSync } from "fs";
+import { join } from "path";
+import { safeReadJson } from "./hook-env.mjs";
+var ACTIVATION_MARKER_FILES = [
+ "vercel.json",
+ "next.config.js",
+ "next.config.mjs",
+ "next.config.ts",
+ "next.config.mts"
+];
+function readPackageJson(projectRoot) {
+ return safeReadJson(join(projectRoot, "package.json"));
+}
+function packageJsonSignalsVercel(projectRoot) {
+ const pkg = readPackageJson(projectRoot);
+ if (!pkg) return false;
+ const allDeps = {
+ ...pkg.dependencies || {},
+ ...pkg.devDependencies || {}
+ };
+ if (Object.keys(allDeps).some(
+ (dep) => dep === "next" || dep === "vercel" || dep.startsWith("@vercel/")
+ )) {
+ return true;
+ }
+ const scripts = pkg.scripts && typeof pkg.scripts === "object" ? pkg.scripts : {};
+ return Object.values(scripts).some(
+ (value) => typeof value === "string" && /\bvercel\b/.test(value)
+ );
+}
+function hasSessionStartActivationMarkers(projectRoot) {
+ if (ACTIVATION_MARKER_FILES.some((file) => existsSync(join(projectRoot, file)))) {
+ return true;
+ }
+ if (existsSync(join(projectRoot, ".vercel"))) {
+ return true;
+ }
+ return packageJsonSignalsVercel(projectRoot);
+}
+function isGreenfieldDirectory(projectRoot) {
+ let dirents;
+ try {
+ dirents = readdirSync(projectRoot, { withFileTypes: true });
+ } catch {
+ return false;
+ }
+ const hasNonDotDir = dirents.some((d) => !d.name.startsWith("."));
+ const hasDotFile = dirents.some((d) => d.name.startsWith(".") && d.isFile());
+ return !hasNonDotDir && !hasDotFile;
+}
+export {
+ hasSessionStartActivationMarkers,
+ isGreenfieldDirectory
+};
diff --git a/hooks/session-start-profiler.mjs b/hooks/session-start-profiler.mjs
index 92123db..3f4c7b2 100644
--- a/hooks/session-start-profiler.mjs
+++ b/hooks/session-start-profiler.mjs
@@ -4,8 +4,7 @@ import {
constants as fsConstants,
existsSync,
readFileSync,
- readdirSync,
- writeFileSync
+ readdirSync
} from "fs";
import { delimiter, join, resolve } from "path";
import { execFileSync } from "child_process";
@@ -15,10 +14,11 @@ import {
normalizeInput,
setSessionEnv
} from "./compat.mjs";
-import { pluginRoot, profileCachePath, safeReadJson, writeSessionFile } from "./hook-env.mjs";
+import { pluginRoot, safeReadJson, writeSessionFile } from "./hook-env.mjs";
import { createLogger, logCaughtError } from "./logger.mjs";
+import { hasSessionStartActivationMarkers } from "./session-start-activation.mjs";
import { buildSkillMap } from "./skill-map-frontmatter.mjs";
-import { trackBaseEvents, getOrCreateDeviceId } from "./telemetry.mjs";
+import { trackDauActiveToday } from "./telemetry.mjs";
var FILE_MARKERS = [
{ file: "next.config.js", skills: ["nextjs", "turbopack"] },
{ file: "next.config.mjs", skills: ["nextjs", "turbopack"] },
@@ -409,8 +409,25 @@ async function main() {
const platform = detectSessionStartPlatform(hookInput);
const sessionId = normalizeSessionStartSessionId(hookInput);
const projectRoot = resolveSessionStartProjectRoot();
- logBrokenSkillFrontmatterSummary();
const greenfield = checkGreenfield(projectRoot);
+ const shouldActivate = greenfield !== null || !existsSync(projectRoot) || hasSessionStartActivationMarkers(projectRoot);
+ if (!shouldActivate) {
+ log.debug("session-start-profiler:skipped-non-vercel-project", {
+ projectRoot,
+ reason: "non-empty-without-vercel-markers"
+ });
+ if (sessionId) {
+ writeSessionFile(sessionId, SESSION_GREENFIELD_KIND, "");
+ writeSessionFile(sessionId, SESSION_LIKELY_SKILLS_KIND, "");
+ }
+ if (platform === "cursor") {
+ process.stdout.write(JSON.stringify(formatOutput("cursor", {})));
+ }
+ await trackDauActiveToday().catch(() => {
+ });
+ process.exit(0);
+ }
+ logBrokenSkillFrontmatterSummary();
const cliStatus = checkVercelCli();
const userMessages = buildSessionStartProfilerUserMessages(greenfield, cliStatus);
const likelySkills = greenfield ? GREENFIELD_DEFAULT_SKILLS : profileProject(projectRoot);
@@ -449,37 +466,8 @@ async function main() {
`);
}
- if (sessionId) {
- try {
- const cache = {
- projectRoot,
- likelySkills,
- greenfield: greenfield !== null,
- bootstrapHints: setupSignals.bootstrapHints,
- resourceHints: setupSignals.resourceHints,
- setupMode: setupSignals.setupMode,
- timestamp: (/* @__PURE__ */ new Date()).toISOString()
- };
- writeFileSync(profileCachePath(sessionId), JSON.stringify(cache), "utf-8");
- } catch (error) {
- logCaughtError(log, "session-start-profiler:write-profile-cache-failed", error, {
- sessionId,
- projectRoot
- });
- }
- }
- if (sessionId) {
- const deviceId = getOrCreateDeviceId();
- await trackBaseEvents(sessionId, [
- { key: "session:device_id", value: deviceId },
- { key: "session:platform", value: process.platform },
- { key: "session:likely_skills", value: likelySkills.join(",") },
- { key: "session:greenfield", value: String(greenfield !== null) },
- { key: "session:vercel_cli_installed", value: String(cliStatus.installed) },
- { key: "session:vercel_cli_version", value: cliStatus.currentVersion || "" }
- ]).catch(() => {
- });
- }
+ await trackDauActiveToday().catch(() => {
+ });
if (cursorOutput) {
process.stdout.write(cursorOutput);
}
diff --git a/hooks/setup-telemetry.mjs b/hooks/setup-telemetry.mjs
index 1ba166d..31b714a 100755
--- a/hooks/setup-telemetry.mjs
+++ b/hooks/setup-telemetry.mjs
@@ -1,39 +1,17 @@
#!/usr/bin/env node
// hooks/src/setup-telemetry.mts
-import { readFileSync } from "fs";
-import { homedir } from "os";
-import { join } from "path";
import { getTelemetryOverride } from "./telemetry.mjs";
-var PREF_PATH = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
function main() {
const telemetryOverride = getTelemetryOverride();
if (telemetryOverride === "off") {
- process.stdout.write(
- "Telemetry is fully disabled via VERCEL_PLUGIN_TELEMETRY=off.\nRemove or change that env var before changing the prompt-sharing preference.\n"
- );
- process.exit(0);
- }
- let existing = null;
- try {
- existing = readFileSync(PREF_PATH, "utf-8").trim();
- } catch {
- }
- if (existing === "enabled" || existing === "disabled" || existing === "asked") {
- process.stdout.write(
- `Prompt telemetry is currently ${existing}. This setting controls prompt text sharing only.
-To change prompt sharing, edit ~/.claude/vercel-plugin-telemetry-preference. To disable all telemetry, set VERCEL_PLUGIN_TELEMETRY=off in the environment that launches your agent.
-`
- );
+ process.stdout.write("Telemetry is fully disabled via VERCEL_PLUGIN_TELEMETRY=off.\n");
process.exit(0);
}
process.stdout.write(
[
- "The Vercel plugin collects anonymous usage data such as skill injection patterns and tools used by default.",
- "Ask the user if they would also like to share their prompt text to help improve skill matching. This choice only controls prompt telemetry.",
- "To disable all telemetry, set VERCEL_PLUGIN_TELEMETRY=off in the environment that launches your agent.",
- "If they agree, run `echo 'enabled' > ~/.claude/vercel-plugin-telemetry-preference`.",
- "If they decline, run `echo 'disabled' > ~/.claude/vercel-plugin-telemetry-preference`.",
+ "The default telemetry profile is a once-per-day DAU phone-home that sends only dau:active_today.",
+ "To disable all telemetry, set VERCEL_PLUGIN_TELEMETRY=off.",
""
].join("\n")
);
diff --git a/hooks/src/hook-env.mts b/hooks/src/hook-env.mts
index d0fe68f..c620575 100644
--- a/hooks/src/hook-env.mts
+++ b/hooks/src/hook-env.mts
@@ -6,7 +6,7 @@
* try/catch boilerplate.
*/
-import { createHash, randomUUID } from "node:crypto";
+import { createHash } from "node:crypto";
import {
appendFileSync,
closeSync,
@@ -267,30 +267,6 @@ export function removeAllSessionDedupArtifacts(sessionId: string): RemoveArtifac
return result;
}
-// ---------------------------------------------------------------------------
-// Profile cache helpers
-// ---------------------------------------------------------------------------
-
-/**
- * Resolve the path for the session profile cache file.
- * Written by session-start-profiler, read by subagent-start-bootstrap.
- */
-export function profileCachePath(sessionId: string): string {
- return resolveDedupTempPath(sessionId, "profile.json");
-}
-
-// ---------------------------------------------------------------------------
-// Verification ID
-// ---------------------------------------------------------------------------
-
-/**
- * Generate a unique verification ID (UUIDv4) for traceability across
- * dev-server verification injection events.
- */
-export function generateVerificationId(): string {
- return randomUUID();
-}
-
// ---------------------------------------------------------------------------
// Defensive file / JSON readers
// ---------------------------------------------------------------------------
diff --git a/hooks/src/inject-claude-md.mts b/hooks/src/inject-claude-md.mts
index 38fd1ac..b364c76 100644
--- a/hooks/src/inject-claude-md.mts
+++ b/hooks/src/inject-claude-md.mts
@@ -5,11 +5,12 @@
* Cursor receives `{ additional_context: "..." }` JSON on stdout.
*/
-import { readFileSync } from "node:fs";
+import { existsSync, readFileSync } from "node:fs";
import { join, resolve } from "node:path";
import { fileURLToPath } from "node:url";
import { formatOutput, type HookPlatform } from "./compat.mjs";
import { pluginRoot, safeReadFile } from "./hook-env.mjs";
+import { hasSessionStartActivationMarkers, isGreenfieldDirectory } from "./session-start-activation.mjs";
interface InjectClaudeMdInput {
session_id?: string;
@@ -52,6 +53,7 @@ export function buildInjectClaudeMdParts(
content: string | null,
env: NodeJS.ProcessEnv = process.env,
knowledgeUpdate: string | null = null,
+ greenfield = env.VERCEL_PLUGIN_GREENFIELD === "true",
): string[] {
const parts: string[] = [];
@@ -63,7 +65,7 @@ export function buildInjectClaudeMdParts(
parts.push(knowledgeUpdate);
}
- if (env.VERCEL_PLUGIN_GREENFIELD === "true") {
+ if (greenfield) {
parts.push(GREENFIELD_CONTEXT);
}
@@ -78,6 +80,10 @@ export function formatInjectClaudeMdOutput(platform: HookPlatform, content: stri
return content;
}
+function resolveInjectClaudeMdProjectRoot(env: NodeJS.ProcessEnv = process.env): string {
+ return env.CLAUDE_PROJECT_ROOT ?? env.CURSOR_PROJECT_DIR ?? process.cwd();
+}
+
function stripFrontmatter(content: string): string {
const match = content.match(/^---\n[\s\S]*?\n---\n?([\s\S]*)$/);
return match ? match[1].trim() : content.trim();
@@ -86,10 +92,26 @@ function stripFrontmatter(content: string): string {
function main(): void {
const input = parseInjectClaudeMdInput(readFileSync(0, "utf8"));
const platform = detectInjectClaudeMdPlatform(input);
+ const projectRoot = resolveInjectClaudeMdProjectRoot();
+ const isGreenfield = isGreenfieldDirectory(projectRoot);
+ const greenfieldOverride = process.env.VERCEL_PLUGIN_GREENFIELD === "true";
+ const shouldActivate =
+ isGreenfield || greenfieldOverride || !existsSync(projectRoot) || hasSessionStartActivationMarkers(projectRoot);
+ if (!shouldActivate) {
+ if (platform === "cursor") {
+ process.stdout.write(JSON.stringify(formatOutput(platform, {})));
+ }
+ return;
+ }
const thinSessionContext = safeReadFile(join(pluginRoot(), "vercel-session.md"));
const knowledgeUpdateRaw = safeReadFile(join(pluginRoot(), "skills", "knowledge-update", "SKILL.md"));
const knowledgeUpdate = knowledgeUpdateRaw !== null ? stripFrontmatter(knowledgeUpdateRaw) : null;
- const parts = buildInjectClaudeMdParts(thinSessionContext, process.env, knowledgeUpdate);
+ const parts = buildInjectClaudeMdParts(
+ thinSessionContext,
+ process.env,
+ knowledgeUpdate,
+ isGreenfield || greenfieldOverride,
+ );
if (parts.length === 0) {
return;
diff --git a/hooks/src/posttooluse-bash-chain.mts b/hooks/src/posttooluse-bash-chain.mts
deleted file mode 100644
index 2d4d3db..0000000
--- a/hooks/src/posttooluse-bash-chain.mts
+++ /dev/null
@@ -1,550 +0,0 @@
-#!/usr/bin/env node
-/**
- * PostToolUse hook: detects package installations from Bash tool output
- * and chains to the appropriate skill context.
- *
- * Input: JSON on stdin with tool_name, tool_input, session_id
- * Output: JSON on stdout with { hookSpecificOutput: { additionalContext: "..." } } or {}
- *
- * Only fires for Bash tool calls. Parses npm install/yarn add/pnpm add/bun add
- * commands, extracts package names, maps them to skills, and injects skill context.
- *
- * Respects the session-backed dedup contract (atomic claims, seen-skills file).
- */
-
-import type { SyncHookJSONOutput } from "@anthropic-ai/claude-agent-sdk";
-import { readFileSync, realpathSync } from "node:fs";
-import { join, resolve } from "node:path";
-import { fileURLToPath } from "node:url";
-import { detectPlatform, type HookPlatform } from "./compat.mjs";
-import {
- pluginRoot as resolvePluginRoot,
- readSessionFile,
- safeReadFile,
- tryClaimSessionKey,
- syncSessionFileFromClaims,
-} from "./hook-env.mjs";
-import { extractFrontmatter } from "./skill-map-frontmatter.mjs";
-import { createLogger, logCaughtError } from "./logger.mjs";
-import type { Logger } from "./logger.mjs";
-
-const PLUGIN_ROOT = resolvePluginRoot();
-const CHAIN_BUDGET_BYTES = 18_000;
-const DEFAULT_CHAIN_CAP = 2;
-
-// ---------------------------------------------------------------------------
-// Package → Skill mapping
-// ---------------------------------------------------------------------------
-
-/**
- * Maps known npm package names to skill slugs. When a user installs one of
- * these packages, we inject the corresponding skill context so the agent
- * has up-to-date guidance for the Vercel-ecosystem alternative or best practice.
- */
-export const PACKAGE_SKILL_MAP: Record = {
- // Express / traditional Node servers → Vercel Functions
- express: {
- skill: "vercel-functions",
- message: "Express.js detected — Vercel uses Web API route handlers, not Express. Loading Vercel Functions guidance.",
- },
- fastify: {
- skill: "vercel-functions",
- message: "Fastify detected — consider Vercel Functions with Web Request/Response API for serverless deployment.",
- },
- koa: {
- skill: "vercel-functions",
- message: "Koa detected — consider Vercel Functions with Web Request/Response API for serverless deployment.",
- },
-
- // Database / ORM libraries → Vercel Storage
- mongoose: {
- skill: "vercel-storage",
- message: "Mongoose detected — loading Vercel Storage guidance for database options on the platform.",
- },
- prisma: {
- skill: "vercel-storage",
- message: "Prisma detected — loading Vercel Storage guidance for Neon Postgres (recommended) and other Marketplace databases.",
- },
- "@libsql/client": {
- skill: "vercel-storage",
- message: "@libsql/client detected — loading Vercel Storage guidance for Marketplace database alternatives.",
- },
- "@vercel/postgres": {
- skill: "vercel-storage",
- message: "@vercel/postgres is sunset — use @neondatabase/serverless instead. Loading Storage migration guidance.",
- },
- "@vercel/kv": {
- skill: "vercel-storage",
- message: "@vercel/kv is sunset — use @upstash/redis instead. Loading Storage migration guidance.",
- },
-
- // Payments → Stripe integration
- stripe: {
- skill: "payments",
- message: "Stripe detected — loading Vercel Marketplace Stripe integration guidance for checkout, webhooks, and subscriptions.",
- },
-
- // Direct AI provider SDKs → AI Gateway
- openai: {
- skill: "ai-gateway",
- message: "Direct OpenAI SDK detected — AI Gateway provides OIDC auth, failover, and cost tracking with no manual API keys. Loading AI Gateway guidance.",
- },
- "@anthropic-ai/sdk": {
- skill: "ai-gateway",
- message: "Direct Anthropic SDK detected — AI Gateway provides unified access to all providers. Loading AI Gateway guidance.",
- },
- "@google/generative-ai": {
- skill: "ai-gateway",
- message: "Direct Google AI SDK detected — AI Gateway provides unified access to all providers. Loading AI Gateway guidance.",
- },
- langchain: {
- skill: "ai-sdk",
- message: "LangChain detected — AI SDK v6 provides native tool calling, agents, and streaming without the LangChain abstraction layer. Loading AI SDK guidance.",
- },
- "@langchain/core": {
- skill: "ai-sdk",
- message: "LangChain Core detected — AI SDK v6 provides native tool calling, agents, and streaming without the LangChain abstraction layer. Loading AI SDK guidance.",
- },
-
- // Auth
- "next-auth": {
- skill: "auth",
- message: "next-auth detected — consider Clerk via Vercel Marketplace for managed auth with auto-provisioned env vars. Loading auth guidance.",
- },
- "@clerk/nextjs": {
- skill: "auth",
- message: "@clerk/nextjs detected — loading Vercel Marketplace Clerk integration guidance for middleware auth and sign-in flows.",
- },
-
- // CMS
- "@sanity/client": {
- skill: "cms",
- message: "@sanity/client detected — loading Vercel Marketplace Sanity integration guidance for studio, preview mode, and revalidation.",
- },
- contentful: {
- skill: "cms",
- message: "Contentful detected — loading CMS integration guidance for content modeling, preview mode, and revalidation webhooks.",
- },
-
- // Chat platforms → Chat SDK
- "@slack/bolt": {
- skill: "chat-sdk",
- message: "@slack/bolt detected — Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance.",
- },
- "@slack/web-api": {
- skill: "chat-sdk",
- message: "@slack/web-api detected — Chat SDK provides a unified multi-platform API with cards, streaming, and state management. Loading Chat SDK guidance.",
- },
- "discord.js": {
- skill: "chat-sdk",
- message: "discord.js detected — Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance.",
- },
- telegraf: {
- skill: "chat-sdk",
- message: "Telegraf detected — Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance.",
- },
- grammy: {
- skill: "chat-sdk",
- message: "Grammy detected — Chat SDK provides a unified multi-platform API (Slack, Teams, Discord, Telegram) with a single codebase. Loading Chat SDK guidance.",
- },
-
- // Email
- resend: {
- skill: "email",
- message: "Resend detected — loading Vercel Marketplace Resend integration guidance for transactional emails and React Email templates.",
- },
-
- // Workflow-related
- workflow: {
- skill: "workflow",
- message: "Workflow DevKit installed — loading WDK guidance for durable workflows.",
- },
-
- // AI SDK
- ai: {
- skill: "ai-sdk",
- message: "AI SDK installed — loading AI SDK v6 guidance.",
- },
- "@ai-sdk/react": {
- skill: "ai-sdk",
- message: "@ai-sdk/react installed — loading AI SDK v6 guidance for React hooks.",
- },
-
- // Security / middleware
- cors: {
- skill: "routing-middleware",
- message: "cors detected — Vercel Routing Middleware handles CORS at the platform level with rewrites and headers. Loading Routing Middleware guidance.",
- },
-
- // Env management
- dotenv: {
- skill: "env-vars",
- message: "dotenv detected — Vercel manages environment variables natively via `vercel env`. Loading env-vars guidance.",
- },
-
-};
-
-// ---------------------------------------------------------------------------
-// Logger
-// ---------------------------------------------------------------------------
-
-const log: Logger = createLogger();
-
-// ---------------------------------------------------------------------------
-// Command parsing
-// ---------------------------------------------------------------------------
-
-/**
- * Regex that matches package manager install commands and captures the
- * package list portion. Handles:
- * npm install
- * npm i
- * npm add
- * yarn add
- * pnpm add
- * pnpm install (when followed by package names)
- * bun add
- * bun install (when followed by package names)
- */
-const INSTALL_CMD_RE =
- /(?:npm\s+(?:install|i|add)|yarn\s+add|pnpm\s+(?:add|install)|bun\s+(?:add|install))\s+(.+)/;
-
-/**
- * Parse a bash command string and extract installed package names.
- * Returns an array of package names (without version specifiers).
- */
-export function parseInstallCommand(command: string): string[] {
- if (!command || typeof command !== "string") return [];
-
- const match = INSTALL_CMD_RE.exec(command);
- if (!match) return [];
-
- const pkgString = match[1];
- const packages: string[] = [];
-
- // Split on whitespace, filter flags (--save-dev, -D, etc.) and version specs
- for (const token of pkgString.split(/\s+/)) {
- if (!token) continue;
- // Skip flags
- if (token.startsWith("-")) continue;
- // Skip if it looks like a path (./foo, ../bar, /abs)
- if (token.startsWith(".") || token.startsWith("/")) continue;
-
- // Strip version specifier (@latest, @^1.0.0, etc.) but preserve scoped packages (@scope/pkg)
- let pkgName = token;
- // For scoped packages like @scope/pkg@1.0.0, strip version after the second @
- if (pkgName.startsWith("@")) {
- const slashIndex = pkgName.indexOf("/");
- if (slashIndex > 0) {
- const afterSlash = pkgName.slice(slashIndex + 1);
- const versionAt = afterSlash.indexOf("@");
- if (versionAt > 0) {
- pkgName = pkgName.slice(0, slashIndex + 1 + versionAt);
- }
- }
- } else {
- // Unscoped: strip @version
- const atIndex = pkgName.indexOf("@");
- if (atIndex > 0) {
- pkgName = pkgName.slice(0, atIndex);
- }
- }
-
- if (pkgName) packages.push(pkgName);
- }
-
- return packages;
-}
-
-// ---------------------------------------------------------------------------
-// Input parsing
-// ---------------------------------------------------------------------------
-
-export interface ParsedBashInput {
- command: string;
- sessionId: string | null;
- platform: HookPlatform;
-}
-
-function resolveSessionId(input: Record): string | null {
- const sessionId = input.session_id ?? input.conversation_id;
- return typeof sessionId === "string" && sessionId.trim() !== "" ? sessionId : null;
-}
-
-export function parseBashInput(
- raw: string,
- logger?: Logger,
-): ParsedBashInput | null {
- const l = logger || log;
- const trimmed = (raw || "").trim();
- if (!trimmed) return null;
-
- let input: Record;
- try {
- input = JSON.parse(trimmed);
- } catch {
- return null;
- }
-
- const toolName = (input.tool_name as string) || "";
- if (toolName !== "Bash") {
- l.debug("posttooluse-bash-chain-skip", { reason: "not_bash_tool", toolName });
- return null;
- }
-
- const toolInput = (input.tool_input as Record) || {};
- const command = (toolInput.command as string) || "";
- if (!command) {
- l.debug("posttooluse-bash-chain-skip", { reason: "no_command" });
- return null;
- }
-
- const sessionId = resolveSessionId(input);
- const platform = detectPlatform(input);
-
- return { command, sessionId, platform };
-}
-
-// ---------------------------------------------------------------------------
-// Skill injection
-// ---------------------------------------------------------------------------
-
-export interface BashChainInjection {
- packageName: string;
- skill: string;
- message: string;
- content: string;
-}
-
-export interface BashChainResult {
- injected: BashChainInjection[];
- totalBytes: number;
-}
-
-/**
- * For each installed package that maps to a skill, read the SKILL.md body
- * and prepare it for injection (respecting dedup and budget).
- */
-export function runBashChainInjection(
- packages: string[],
- sessionId: string | null,
- pluginRoot: string,
- logger?: Logger,
- env: NodeJS.ProcessEnv = process.env,
-): BashChainResult {
- const l = logger || log;
- const result: BashChainResult = { injected: [], totalBytes: 0 };
-
- if (packages.length === 0) return result;
-
- const chainCap = Math.max(
- 1,
- parseInt(env.VERCEL_PLUGIN_CHAIN_CAP || "", 10) || DEFAULT_CHAIN_CAP,
- );
-
- // Read the persisted session-backed seen-skills state for dedup
- const fileSeen = sessionId ? readSessionFile(sessionId, "seen-skills") : "";
- const seenSet = new Set(fileSeen.split(",").filter(Boolean));
-
- // Deduplicate target skills across packages (first package wins per skill)
- const targetsSeen = new Set();
-
- for (const pkg of packages) {
- const mapping = PACKAGE_SKILL_MAP[pkg];
- if (!mapping) continue;
-
- const { skill, message } = mapping;
-
- // Skip duplicate targets within this invocation
- if (targetsSeen.has(skill)) continue;
- targetsSeen.add(skill);
-
- // Enforce chain cap
- if (result.injected.length >= chainCap) {
- l.debug("posttooluse-bash-chain-cap-reached", {
- cap: chainCap,
- remaining: packages.length - result.injected.length,
- });
- break;
- }
-
- // Skip if already injected this session
- if (seenSet.has(skill)) {
- l.debug("posttooluse-bash-chain-skip-dedup", { pkg, skill });
- continue;
- }
-
- // Read target SKILL.md
- const skillPath = join(pluginRoot, "skills", skill, "SKILL.md");
- const skillContent = safeReadFile(skillPath);
- if (!skillContent) {
- l.debug("posttooluse-bash-chain-skip-missing", { pkg, skill, path: skillPath });
- continue;
- }
-
- const { body } = extractFrontmatter(skillContent);
- const trimmedBody = body.trim();
- if (!trimmedBody) continue;
-
- // Check budget
- const bytes = Buffer.byteLength(trimmedBody, "utf-8");
- if (result.totalBytes + bytes > CHAIN_BUDGET_BYTES) {
- l.debug("posttooluse-bash-chain-budget-exceeded", {
- pkg,
- skill,
- bytes,
- totalBytes: result.totalBytes,
- budget: CHAIN_BUDGET_BYTES,
- });
- break;
- }
-
- // Claim via dedup
- if (sessionId) {
- const claimed = tryClaimSessionKey(sessionId, "seen-skills", skill);
- if (!claimed) {
- l.debug("posttooluse-bash-chain-skip-concurrent-claim", { pkg, skill });
- seenSet.add(skill);
- continue;
- }
- syncSessionFileFromClaims(sessionId, "seen-skills");
- }
-
- seenSet.add(skill);
-
- result.injected.push({ packageName: pkg, skill, message, content: trimmedBody });
- result.totalBytes += bytes;
-
- l.debug("posttooluse-bash-chain-injected", { pkg, skill, bytes, totalBytes: result.totalBytes });
- }
-
- if (result.injected.length > 0) {
- l.summary("posttooluse-bash-chain-result", {
- injectedCount: result.injected.length,
- totalBytes: result.totalBytes,
- targets: result.injected.map((i) => i.skill),
- });
- }
-
- return result;
-}
-
-// ---------------------------------------------------------------------------
-// Output formatting
-// ---------------------------------------------------------------------------
-
-function formatPlatformOutput(
- platform: HookPlatform,
- additionalContext: string,
-): string {
- if (platform === "cursor") {
- return JSON.stringify({ additional_context: additionalContext });
- }
-
- const output: SyncHookJSONOutput = {
- hookSpecificOutput: {
- hookEventName: "PostToolUse" as const,
- additionalContext,
- },
- };
- return JSON.stringify(output);
-}
-
-export function formatBashChainOutput(
- chainResult: BashChainResult,
- platform: HookPlatform = "claude-code",
-): string {
- if (chainResult.injected.length === 0) return "{}";
-
- const parts: string[] = [];
- for (const chain of chainResult.injected) {
- parts.push(
- ``,
- `**Skill context auto-loaded** (${chain.skill}): ${chain.message}`,
- "",
- chain.content,
- ``,
- );
- }
-
- const metadata = {
- version: 1,
- hook: "posttooluse-bash-chain",
- packages: chainResult.injected.map((i) => i.packageName),
- chainedSkills: chainResult.injected.map((i) => i.skill),
- };
- parts.push(``);
-
- return formatPlatformOutput(platform, parts.join("\n"));
-}
-
-// ---------------------------------------------------------------------------
-// Orchestrator
-// ---------------------------------------------------------------------------
-
-export function run(): string {
- const tStart = log.active ? log.now() : 0;
-
- let raw: string;
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return "{}";
- }
-
- const parsed = parseBashInput(raw, log);
- if (!parsed) return "{}";
-
- const { command, sessionId, platform } = parsed;
-
- const packages = parseInstallCommand(command);
- if (packages.length === 0) {
- log.debug("posttooluse-bash-chain-skip", { reason: "no_packages_detected", command });
- return "{}";
- }
-
- log.debug("posttooluse-bash-chain-packages", { packages, command });
-
- const chainResult = runBashChainInjection(packages, sessionId, PLUGIN_ROOT, log);
- const output = formatBashChainOutput(chainResult, platform);
-
- log.complete("posttooluse-bash-chain-done", {
- matchedCount: packages.length,
- injectedCount: chainResult.injected.length,
- dedupedCount: 0,
- cappedCount: 0,
- }, log.active ? { total: Math.round(log.now() - tStart) } : {});
-
- return output;
-}
-
-// ---------------------------------------------------------------------------
-// Execute (only when run directly)
-// ---------------------------------------------------------------------------
-
-function isMainModule(): boolean {
- try {
- const scriptPath = realpathSync(resolve(process.argv[1] || ""));
- const modulePath = realpathSync(fileURLToPath(import.meta.url));
- return scriptPath === modulePath;
- } catch {
- return false;
- }
-}
-
-if (isMainModule()) {
- try {
- const output = run();
- process.stdout.write(output);
- } catch (err) {
- const entry = [
- `[${new Date().toISOString()}] CRASH in posttooluse-bash-chain.mts`,
- ` error: ${(err as Error)?.message || String(err)}`,
- ` stack: ${(err as Error)?.stack || "(no stack)"}`,
- ` PLUGIN_ROOT: ${PLUGIN_ROOT}`,
- "",
- ].join("\n");
- process.stderr.write(entry);
- process.stdout.write("{}");
- }
-}
diff --git a/hooks/src/posttooluse-telemetry.mts b/hooks/src/posttooluse-telemetry.mts
deleted file mode 100644
index b202acc..0000000
--- a/hooks/src/posttooluse-telemetry.mts
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env node
-
-import { readFileSync } from "node:fs";
-
-function parseStdin(): Record | null {
- try {
- const raw = readFileSync(0, "utf-8").trim();
- if (!raw) return null;
- return JSON.parse(raw);
- } catch {
- return null;
- }
-}
-
-async function main(): Promise {
- parseStdin();
-
- process.stdout.write("{}");
- process.exit(0);
-}
-
-main();
diff --git a/hooks/src/posttooluse-validate.mts b/hooks/src/posttooluse-validate.mts
deleted file mode 100644
index b6f26c2..0000000
--- a/hooks/src/posttooluse-validate.mts
+++ /dev/null
@@ -1,945 +0,0 @@
-#!/usr/bin/env node
-/**
- * PostToolUse hook: validates files after Write/Edit operations against
- * skill-specific validation rules defined in SKILL.md frontmatter.
- *
- * Input: JSON on stdin with tool_name, tool_input, session_id, cwd
- * Output: JSON on stdout with { hookSpecificOutput: { additionalContext: "..." } } or {}
- *
- * Only fires for Write and Edit tool calls. Reads the written file,
- * matches it against skill import/path patterns, then runs validate:
- * regex rules from matched skills. Error-severity violations produce
- * additionalContext with fix instructions. Warn-severity only at debug level.
- *
- * Dedup: tracks validated file+hash pairs in VERCEL_PLUGIN_VALIDATED_FILES
- * for in-process checks and persists the merged state in the session
- * "validated-files" file to skip re-validation across hook invocations.
- *
- * Pipeline stages:
- * parseInput → loadValidateRules → matchFileToSkills → runValidation
- * → runChainInjection → formatOutput
- */
-
-import type { SyncHookJSONOutput } from "@anthropic-ai/claude-agent-sdk";
-import { createHash } from "node:crypto";
-import { appendFileSync, readFileSync, realpathSync } from "node:fs";
-import { join, resolve } from "node:path";
-import { fileURLToPath } from "node:url";
-import { detectPlatform, type HookPlatform } from "./compat.mjs";
-import {
- dedupFilePath,
- pluginRoot as resolvePluginRoot,
- readSessionFile,
- safeReadFile,
- writeSessionFile,
- tryClaimSessionKey,
- syncSessionFileFromClaims,
-} from "./hook-env.mjs";
-import { buildSkillMap, extractFrontmatter } from "./skill-map-frontmatter.mjs";
-import type { ChainToRule, SkillConfig, ValidationRule } from "./skill-map-frontmatter.mjs";
-import {
- compileSkillPatterns,
- matchPathWithReason,
- matchImportWithReason,
- importPatternToRegex,
-} from "./patterns.mjs";
-import type { CompiledSkillEntry, CompiledPattern } from "./patterns.mjs";
-import { createLogger, logCaughtError } from "./logger.mjs";
-import type { Logger } from "./logger.mjs";
-
-const PLUGIN_ROOT = resolvePluginRoot();
-const SUPPORTED_TOOLS = ["Write", "Edit"];
-const VALIDATED_FILES_ENV_KEY = "VERCEL_PLUGIN_VALIDATED_FILES";
-const SEEN_VALIDATIONS_KIND = "seen-validations";
-const CHAIN_BUDGET_BYTES = 18_000;
-const DEFAULT_CHAIN_CAP = 2;
-const REPEATED_SUGGESTION_THRESHOLD = 3;
-
-// ---------------------------------------------------------------------------
-// Types
-// ---------------------------------------------------------------------------
-
-export interface ParsedInput {
- toolName: string;
- filePath: string;
- filePaths: string[];
- sessionId: string | null;
- cwd: string;
- platform: HookPlatform;
-}
-
-function resolveToolFilePaths(toolInput: Record): string[] {
- const collected: string[] = [];
-
- const pushPath = (value: unknown): void => {
- if (typeof value !== "string") return;
- const trimmed = value.trim();
- if (trimmed !== "") {
- collected.push(trimmed);
- }
- };
-
- pushPath(toolInput.file_path);
-
- if (Array.isArray(toolInput.file_paths)) {
- for (const value of toolInput.file_paths) {
- pushPath(value);
- }
- }
-
- if (Array.isArray(toolInput.files)) {
- for (const value of toolInput.files) {
- if (typeof value === "string") {
- pushPath(value);
- continue;
- }
-
- if (value && typeof value === "object" && "file_path" in value) {
- pushPath((value as { file_path?: unknown }).file_path);
- }
- }
- }
-
- return [...new Set(collected)];
-}
-
-function resolveSessionId(input: Record): string | null {
- const sessionId = input.session_id ?? input.conversation_id;
- return typeof sessionId === "string" && sessionId.trim() !== "" ? sessionId : null;
-}
-
-function resolveHookCwd(input: Record, env: NodeJS.ProcessEnv): string {
- const workspaceRoot = Array.isArray(input.workspace_roots) ? input.workspace_roots[0] : undefined;
- const candidate = input.cwd
- ?? workspaceRoot
- ?? env.CURSOR_PROJECT_DIR
- ?? env.CLAUDE_PROJECT_ROOT
- ?? process.cwd();
-
- return typeof candidate === "string" && candidate.trim() !== "" ? candidate : process.cwd();
-}
-
-function formatPlatformOutput(
- platform: HookPlatform,
- additionalContext?: string,
- env?: Record,
-): string {
- if (!additionalContext) {
- return "{}";
- }
-
- if (platform === "cursor") {
- const output: Record = {
- additional_context: additionalContext,
- };
- if (env && Object.keys(env).length > 0) {
- output.env = env;
- }
- return JSON.stringify(output);
- }
-
- const output: SyncHookJSONOutput = {
- hookSpecificOutput: {
- hookEventName: "PostToolUse" as const,
- additionalContext,
- },
- };
-
- return JSON.stringify(output);
-}
-
-export interface SkillValidateRules {
- skill: string;
- rules: ValidationRule[];
-}
-
-export interface ValidationViolation {
- skill: string;
- line: number;
- message: string;
- severity: "error" | "recommended" | "warn";
- matchedText: string;
- filePath?: string;
- ruleId?: string;
- occurrenceCount?: number;
- repeated?: boolean;
- upgradeToSkill?: string;
- upgradeWhy?: string;
- upgradeMode?: "hard" | "soft";
-}
-
-/**
- * Generate a stable ID for a validation rule (skill + pattern hash).
- */
-function validationRuleId(skill: string, rule: ValidationRule): string {
- return `${skill}::${rule.pattern}`;
-}
-
-export interface ValidateResult {
- violations: ValidationViolation[];
- matchedSkills: string[];
- skippedDedup: boolean;
-}
-
-// ---------------------------------------------------------------------------
-// Logger
-// ---------------------------------------------------------------------------
-
-const log: Logger = createLogger();
-
-// ---------------------------------------------------------------------------
-// Pipeline stage 1: parseInput
-// ---------------------------------------------------------------------------
-
-/**
- * Parse raw stdin JSON into a normalized input descriptor.
- * Returns null if input is irrelevant (wrong tool, no file path, etc.).
- */
-export function parseInput(
- raw: string,
- logger?: Logger,
- env: NodeJS.ProcessEnv = process.env,
-): ParsedInput | null {
- const l = logger || log;
- const trimmed = (raw || "").trim();
- if (!trimmed) {
- l.debug("posttooluse-validate-skip", { reason: "stdin_empty" });
- return null;
- }
-
- let input: Record;
- try {
- input = JSON.parse(trimmed);
- } catch {
- l.debug("posttooluse-validate-skip", { reason: "stdin_parse_fail" });
- return null;
- }
-
- const toolName = (input.tool_name as string) || "";
- if (!SUPPORTED_TOOLS.includes(toolName)) {
- l.debug("posttooluse-validate-skip", { reason: "unsupported_tool", toolName });
- return null;
- }
-
- const toolInput = (input.tool_input as Record) || {};
- const filePaths = resolveToolFilePaths(toolInput);
- const filePath = filePaths[0] || "";
- if (!filePath) {
- l.debug("posttooluse-validate-skip", { reason: "no_file_path", toolName });
- return null;
- }
-
- const sessionId = resolveSessionId(input);
- const cwd = resolveHookCwd(input, env);
- const platform = detectPlatform(input);
-
- l.debug("posttooluse-validate-input", {
- toolName,
- filePath,
- filePathsCount: filePaths.length,
- sessionId: sessionId as string,
- cwd,
- platform,
- });
- return { toolName, filePath, filePaths, sessionId, cwd, platform };
-}
-
-// ---------------------------------------------------------------------------
-// Pipeline stage 2: loadValidateRules
-// ---------------------------------------------------------------------------
-
-export interface LoadedValidateData {
- skillMap: Record;
- compiledSkills: CompiledSkillEntry[];
- rulesMap: Map;
- chainMap: Map;
-}
-
-/**
- * Load skills that have validate: rules. Returns null if no rules exist.
- */
-export function loadValidateRules(pluginRoot: string, logger?: Logger): LoadedValidateData | null {
- const l = logger || log;
- const skillsDir = join(pluginRoot, "skills");
- const { skills: skillMap } = buildSkillMap(skillsDir);
-
- // Filter to skills that have validate rules or chainTo rules
- const rulesMap = new Map();
- const chainMap = new Map();
- for (const [slug, config] of Object.entries(skillMap)) {
- if (config.validate && config.validate.length > 0) {
- rulesMap.set(slug, config.validate);
- }
- if (config.chainTo && config.chainTo.length > 0) {
- chainMap.set(slug, config.chainTo);
- }
- }
-
- if (rulesMap.size === 0 && chainMap.size === 0) {
- l.debug("posttooluse-validate-skip", { reason: "no_validate_rules" });
- return null;
- }
-
- const compiledSkills = compileSkillPatterns(skillMap);
- l.debug("posttooluse-validate-loaded", {
- totalSkills: Object.keys(skillMap).length,
- skillsWithRules: rulesMap.size,
- skillsWithChainTo: chainMap.size,
- });
-
- return { skillMap, compiledSkills, rulesMap, chainMap };
-}
-
-// ---------------------------------------------------------------------------
-// Pipeline stage 3: matchFileToSkills
-// ---------------------------------------------------------------------------
-
-/**
- * Match a file path and its content against skill patterns to find
- * which skills' validate rules should apply.
- */
-export function matchFileToSkills(
- filePath: string,
- fileContent: string,
- compiledSkills: CompiledSkillEntry[],
- rulesMap: Map,
- logger?: Logger,
- chainMap?: Map,
-): string[] {
- const l = logger || log;
- const matched: string[] = [];
-
- for (const entry of compiledSkills) {
- // Only check skills that have validate rules or chainTo rules
- if (!rulesMap.has(entry.skill) && !(chainMap?.has(entry.skill))) continue;
-
- // Match by path
- const pathMatch = matchPathWithReason(filePath, entry.compiledPaths);
- if (pathMatch) {
- matched.push(entry.skill);
- l.trace("posttooluse-validate-match", {
- skill: entry.skill,
- matchType: "path",
- pattern: pathMatch.pattern,
- });
- continue;
- }
-
- // Match by import patterns in file content
- const importMatch = matchImportWithReason(fileContent, entry.compiledImports);
- if (importMatch) {
- matched.push(entry.skill);
- l.trace("posttooluse-validate-match", {
- skill: entry.skill,
- matchType: "import",
- pattern: importMatch.pattern,
- });
- }
- }
-
- l.debug("posttooluse-validate-matched", { matchedSkills: matched });
- return matched;
-}
-
-// ---------------------------------------------------------------------------
-// Pipeline stage 4: runValidation
-// ---------------------------------------------------------------------------
-
-/**
- * Run validation rules against file content. Returns violations found.
- */
-export function runValidation(
- fileContent: string,
- matchedSkills: string[],
- rulesMap: Map,
- logger?: Logger,
- filePath?: string,
-): ValidationViolation[] {
- const l = logger || log;
- const violations: ValidationViolation[] = [];
- const lines = fileContent.split("\n");
-
- for (const skill of matchedSkills) {
- const rules = rulesMap.get(skill);
- if (!rules) continue;
-
- for (const rule of rules) {
- const ruleId = validationRuleId(skill, rule);
-
- // Skip rule if file matches the skip condition
- if (rule.skipIfFileContains) {
- try {
- if (new RegExp(rule.skipIfFileContains, "m").test(fileContent)) {
- l.trace("posttooluse-validate-rule-skip", {
- skill,
- pattern: rule.pattern,
- reason: "skipIfFileContains matched",
- });
- continue;
- }
- } catch {
- // Invalid skip regex — proceed with rule anyway
- }
- }
-
- let regex: RegExp;
- try {
- regex = new RegExp(rule.pattern, "g");
- } catch {
- l.debug("posttooluse-validate-regex-fail", {
- skill,
- pattern: rule.pattern,
- });
- continue;
- }
-
- // Check each line for matches
- for (let i = 0; i < lines.length; i++) {
- regex.lastIndex = 0;
- const match = regex.exec(lines[i]);
- if (match) {
- violations.push({
- skill,
- line: i + 1,
- message: rule.message,
- severity: rule.severity,
- matchedText: match[0].slice(0, 80),
- filePath,
- ruleId,
- upgradeToSkill: rule.upgradeToSkill,
- upgradeWhy: rule.upgradeWhy,
- upgradeMode: rule.upgradeMode ?? (rule.upgradeToSkill ? "soft" : undefined),
- });
- }
- }
- }
- }
-
- l.debug("posttooluse-validate-violations", {
- total: violations.length,
- errors: violations.filter((v) => v.severity === "error").length,
- recommended: violations.filter((v) => v.severity === "recommended").length,
- warns: violations.filter((v) => v.severity === "warn").length,
- });
-
- return violations;
-}
-
-// ---------------------------------------------------------------------------
-// Pipeline stage 4b: chainTo injection
-// ---------------------------------------------------------------------------
-
-export interface ChainInjection {
- sourceSkill: string;
- targetSkill: string;
- message?: string;
- content: string;
-}
-
-export interface ChainResult {
- injected: ChainInjection[];
- totalBytes: number;
-}
-
-/**
- * Check chainTo rules from matched skills against file content. For each
- * match, if the target skill hasn't been injected in this session, read
- * its SKILL.md body and collect it for injection.
- *
- * Chain depth is limited to 1 hop (no recursive chaining).
- * A per-invocation byte budget of CHAIN_BUDGET_BYTES applies.
- */
-export function runChainInjection(
- fileContent: string,
- matchedSkills: string[],
- chainMap: Map,
- sessionId: string | null,
- pluginRoot: string,
- logger?: Logger,
- env: NodeJS.ProcessEnv = process.env,
-): ChainResult {
- const l = logger || log;
- const result: ChainResult = { injected: [], totalBytes: 0 };
-
- // Chain cap: max skills injected per PostToolUse invocation
- const chainCap = Math.max(1, parseInt(env.VERCEL_PLUGIN_CHAIN_CAP || "", 10) || DEFAULT_CHAIN_CAP);
-
- // Collect all matching chainTo rules across matched skills
- const candidates: Array<{ sourceSkill: string; rule: ChainToRule }> = [];
- for (const skill of matchedSkills) {
- const rules = chainMap.get(skill);
- if (!rules) continue;
- for (const rule of rules) {
- // skipIfFileContains: skip this chain rule if file already has the target pattern
- if (rule.skipIfFileContains) {
- try {
- if (new RegExp(rule.skipIfFileContains, "m").test(fileContent)) {
- l.debug("posttooluse-chain-skip-contains", {
- skill,
- targetSkill: rule.targetSkill,
- reason: "skipIfFileContains matched",
- });
- continue;
- }
- } catch {
- // Invalid skip regex — proceed with rule anyway
- }
- }
-
- try {
- const regex = new RegExp(rule.pattern, "m");
- if (regex.test(fileContent)) {
- candidates.push({ sourceSkill: skill, rule });
- }
- } catch {
- l.debug("posttooluse-chain-regex-fail", {
- skill,
- pattern: rule.pattern,
- });
- }
- }
- }
-
- if (candidates.length === 0) return result;
-
- // Deduplicate target skills (first match wins per target)
- const seenTargets = new Set();
- const uniqueCandidates = candidates.filter(({ rule }) => {
- if (seenTargets.has(rule.targetSkill)) return false;
- seenTargets.add(rule.targetSkill);
- return true;
- });
-
- // Check dedup against the persisted session-backed seen-skills state
- const fileSeen = sessionId ? readSessionFile(sessionId, "seen-skills") : "";
- const seenSet = new Set(fileSeen.split(",").filter(Boolean));
-
- for (const { sourceSkill, rule } of uniqueCandidates) {
- // Enforce chain cap
- if (result.injected.length >= chainCap) {
- l.debug("posttooluse-chain-cap-reached", {
- cap: chainCap,
- remaining: uniqueCandidates.length - result.injected.length,
- });
- break;
- }
-
- // Skip if target already injected this session (loop prevention)
- if (seenSet.has(rule.targetSkill)) {
- l.debug("posttooluse-chain-skip-dedup", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- });
- continue;
- }
-
- // Read target SKILL.md
- const skillPath = join(pluginRoot, "skills", rule.targetSkill, "SKILL.md");
- const skillContent = safeReadFile(skillPath);
- if (!skillContent) {
- l.debug("posttooluse-chain-skip-missing", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- path: skillPath,
- });
- continue;
- }
-
- // Extract body (skip frontmatter)
- const { body } = extractFrontmatter(skillContent);
- const trimmedBody = body.trim();
- if (!trimmedBody) continue;
-
- // Check budget
- const bytes = Buffer.byteLength(trimmedBody, "utf-8");
- if (result.totalBytes + bytes > CHAIN_BUDGET_BYTES) {
- l.debug("posttooluse-chain-budget-exceeded", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- bytes,
- totalBytes: result.totalBytes,
- budget: CHAIN_BUDGET_BYTES,
- });
- break;
- }
-
- // Claim via dedup
- if (sessionId) {
- const claimed = tryClaimSessionKey(sessionId, "seen-skills", rule.targetSkill);
- if (!claimed) {
- l.debug("posttooluse-chain-skip-concurrent-claim", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- });
- seenSet.add(rule.targetSkill);
- continue;
- }
- syncSessionFileFromClaims(sessionId, "seen-skills");
- }
-
- seenSet.add(rule.targetSkill);
-
- result.injected.push({
- sourceSkill,
- targetSkill: rule.targetSkill,
- message: rule.message,
- content: trimmedBody,
- });
- result.totalBytes += bytes;
-
- l.debug("posttooluse-chain-injected", {
- sourceSkill,
- targetSkill: rule.targetSkill,
- bytes,
- totalBytes: result.totalBytes,
- });
- }
-
- if (result.injected.length > 0) {
- l.summary("posttooluse-chain-result", {
- injectedCount: result.injected.length,
- totalBytes: result.totalBytes,
- targets: result.injected.map((i) => i.targetSkill),
- });
- }
-
- return result;
-}
-
-// ---------------------------------------------------------------------------
-// Dedup: file+hash tracking via env var and session file
-// ---------------------------------------------------------------------------
-
-/**
- * Compute a fast content hash for dedup tracking.
- */
-export function contentHash(content: string): string {
- return createHash("md5").update(content).digest("hex").slice(0, 12);
-}
-
-/**
- * Parse a serialized validated-files state into a Set of "path:hash" pairs.
- */
-export function parseValidatedFiles(envValue: string | undefined): Set {
- if (typeof envValue !== "string" || envValue.trim() === "") {
- return new Set();
- }
- const set = new Set();
- for (const part of envValue.split(",")) {
- const trimmed = part.trim();
- if (trimmed !== "") set.add(trimmed);
- }
- return set;
-}
-
-/**
- * Append a validated file entry to the serialized state value.
- */
-export function appendValidatedFile(envValue: string | undefined, entry: string): string {
- const current = typeof envValue === "string" ? envValue.trim() : "";
- return current === "" ? entry : `${current},${entry}`;
-}
-
-/**
- * Check if a file+hash has already been validated this session.
- */
-export function isAlreadyValidated(filePath: string, hash: string, sessionId?: string | null): boolean {
- const entry = `${filePath}:${hash}`;
- const validated = parseValidatedFiles(process.env.VERCEL_PLUGIN_VALIDATED_FILES);
- if (validated.has(entry)) {
- return true;
- }
-
- if (!sessionId) {
- return false;
- }
-
- const persisted = parseValidatedFiles(readSessionFile(sessionId, "validated-files"));
- return persisted.has(entry);
-}
-
-/**
- * Mark a file+hash as validated for the current process and session file.
- */
-export function markValidated(
- filePath: string,
- hash: string,
- sessionId?: string | null,
-): string {
- const entry = `${filePath}:${hash}`;
- const persistedState = sessionId ? readSessionFile(sessionId, "validated-files") : "";
- const current = process.env[VALIDATED_FILES_ENV_KEY] || persistedState;
- const next = appendValidatedFile(current, entry);
- process.env[VALIDATED_FILES_ENV_KEY] = next;
- if (sessionId) {
- writeSessionFile(sessionId, "validated-files", next);
- }
- return next;
-}
-
-// ---------------------------------------------------------------------------
-// Pipeline stage 5: formatOutput
-// ---------------------------------------------------------------------------
-
-/**
- * Format validation violations into the hook output JSON.
- * Error-severity violations produce mandatory fix instructions.
- * Recommended-severity violations produce imperative best-practice instructions.
- * Warn-severity violations produce soft-fix suggestions at all log levels.
- */
-export function formatOutput(
- violations: ValidationViolation[],
- matchedSkills: string[],
- filePath: string,
- logger?: Logger,
- platform: HookPlatform = "claude-code",
- env?: Record,
- chainResult?: ChainResult,
-): string {
- const l = logger || log;
- const hasChains = chainResult && chainResult.injected.length > 0;
-
- if (violations.length === 0 && !hasChains) {
- l.debug("posttooluse-validate-no-output", { reason: "no_actionable_violations" });
- return "{}";
- }
-
- const errors = violations.filter((v) => v.severity === "error");
- const recommended = violations.filter((v) => v.severity === "recommended");
- const warns = violations.filter((v) => v.severity === "warn");
- const hasErrors = errors.length > 0;
- const hasRecommended = recommended.length > 0;
- const hasWarns = warns.length > 0;
-
- // Group by skill for clear output
- const bySkill = new Map();
- for (const v of violations) {
- if (!bySkill.has(v.skill)) bySkill.set(v.skill, []);
- bySkill.get(v.skill)!.push(v);
- }
-
- const emittedUpgradeSkills = new Set();
-
- const formatViolationLine = (
- violation: ValidationViolation,
- label: "ERROR" | "RECOMMENDED" | "SUGGESTION",
- ): string => {
- const lines = [`- Line ${violation.line} [${label}]: ${violation.message}`];
- if (violation.upgradeToSkill && !emittedUpgradeSkills.has(violation.upgradeToSkill)) {
- emittedUpgradeSkills.add(violation.upgradeToSkill);
- const reason = violation.upgradeWhy ? ` Reason: ${violation.upgradeWhy}` : "";
- const prefix = violation.upgradeMode === "hard" ? "REQUIRED: " : "";
- lines.push("");
- lines.push(`${prefix}Use the Skill tool now to load ${violation.upgradeToSkill}.${reason}`);
- lines.push(
- ``,
- );
- }
- return lines.join("\n");
- };
-
- const parts: string[] = [];
- for (const [skill, skillViolations] of bySkill) {
- const errorLines = skillViolations
- .filter((v) => v.severity === "error")
- .map((v) => formatViolationLine(v, "ERROR"));
- const recommendedLines = skillViolations
- .filter((v) => v.severity === "recommended")
- .map((v) => formatViolationLine(v, "RECOMMENDED"));
- const warnLines = skillViolations
- .filter((v) => v.severity === "warn")
- .map((v) => formatViolationLine(v, "SUGGESTION"));
- parts.push([...errorLines, ...recommendedLines, ...warnLines].join("\n"));
- }
-
- const skillList = [...bySkill.keys()].join(", ");
-
- const counts = [
- hasErrors ? `${errors.length} error${errors.length > 1 ? "s" : ""}` : "",
- hasRecommended ? `${recommended.length} recommendation${recommended.length > 1 ? "s" : ""}` : "",
- hasWarns ? `${warns.length} suggestion${warns.length > 1 ? "s" : ""}` : "",
- ].filter(Boolean).join(", ");
-
- // Errors demand fixes; recommended gets imperative language; warn-only gets a softer call to action
- const callToAction = hasErrors
- ? `Please fix these issues before proceeding.`
- : hasRecommended
- ? `Apply these recommendations before continuing — they reflect current best practices.`
- : `Consider applying these suggestions to follow best practices.`;
-
- // Build validation context block (may be empty if only chains matched)
- const contextParts: string[] = [];
-
- if (violations.length > 0) {
- contextParts.push(
- ``,
- `VALIDATION (${counts}) for \`${filePath}\`:`,
- ...parts,
- callToAction,
- ``,
- );
- }
-
- // Append chain-injected skill content
- if (hasChains) {
- for (const chain of chainResult!.injected) {
- const reason = chain.message ? ` ${chain.message}` : "";
- contextParts.push(
- ``,
- `**Skill context auto-loaded** (${chain.targetSkill}):${reason}`,
- "",
- chain.content,
- ``,
- );
- }
- }
-
- const context = contextParts.join("\n");
-
- const chainedSkills = hasChains
- ? chainResult!.injected.map((c) => c.targetSkill)
- : [];
-
- const metadata = {
- version: 1,
- hook: "posttooluse-validate",
- filePath,
- matchedSkills,
- errorCount: errors.length,
- recommendedCount: recommended.length,
- warnCount: warns.length,
- chainedSkills,
- };
- const metaComment = ``;
-
- l.summary("posttooluse-validate-output", {
- filePath,
- matchedSkills,
- errorCount: errors.length,
- recommendedCount: recommended.length,
- warnCount: warns.length,
- chainedSkills,
- });
-
- return formatPlatformOutput(platform, context + "\n" + metaComment, env);
-}
-
-// ---------------------------------------------------------------------------
-// Orchestrator: run()
-// ---------------------------------------------------------------------------
-
-export function run(): string {
- const timing: Record = {};
- const tStart = log.active ? log.now() : 0;
-
- // Stage 1: parseInput
- let raw: string;
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return "{}";
- }
- const parsed = parseInput(raw, log);
- if (!parsed) return "{}";
- if (log.active) timing.parse = Math.round(log.now() - tStart);
-
- const { toolName, filePath, sessionId, cwd, platform } = parsed;
-
- // Read file content from disk
- const resolvedPath = cwd ? resolve(cwd, filePath) : filePath;
- const fileContent = safeReadFile(resolvedPath);
- if (!fileContent) {
- log.debug("posttooluse-validate-skip", { reason: "file_unreadable", filePath: resolvedPath });
- return "{}";
- }
-
- // Dedup check: skip if same file+hash already validated
- const hash = contentHash(fileContent);
- if (isAlreadyValidated(filePath, hash, sessionId)) {
- log.debug("posttooluse-validate-skip", { reason: "already_validated", filePath, hash });
- return "{}";
- }
-
- // Stage 2: loadValidateRules
- const tLoad = log.active ? log.now() : 0;
- const data = loadValidateRules(PLUGIN_ROOT, log);
- if (!data) return "{}";
- if (log.active) timing.load = Math.round(log.now() - tLoad);
-
- const { compiledSkills, rulesMap, chainMap } = data;
-
- // Stage 3: matchFileToSkills
- const tMatch = log.active ? log.now() : 0;
- const matchedSkills = matchFileToSkills(filePath, fileContent, compiledSkills, rulesMap, log, chainMap);
- if (log.active) timing.match = Math.round(log.now() - tMatch);
-
- if (matchedSkills.length === 0) {
- log.debug("posttooluse-validate-skip", { reason: "no_skill_match", filePath });
- markValidated(filePath, hash, sessionId);
- return "{}";
- }
-
- // Stage 4: runValidation
- const tValidate = log.active ? log.now() : 0;
- const violations = runValidation(fileContent, matchedSkills, rulesMap, log);
- if (log.active) timing.validate = Math.round(log.now() - tValidate);
-
- // Stage 4b: chainTo injection
- const tChain = log.active ? log.now() : 0;
- const chainResult = runChainInjection(
- fileContent, matchedSkills, chainMap, sessionId, PLUGIN_ROOT, log,
- );
- if (log.active) timing.chain = Math.round(log.now() - tChain);
-
- // Mark as validated regardless of result (content hasn't changed)
- const validatedFiles = markValidated(filePath, hash, sessionId);
-
- // Stage 5: formatOutput
- const hasOutput = violations.length > 0 || chainResult.injected.length > 0;
- const cursorEnv = platform === "cursor" && hasOutput
- ? { [VALIDATED_FILES_ENV_KEY]: validatedFiles }
- : undefined;
- const result = formatOutput(violations, matchedSkills, filePath, log, platform, cursorEnv, chainResult);
-
- log.complete("posttooluse-validate-done", {
- matchedCount: matchedSkills.length,
- injectedCount: violations.filter((v) => v.severity === "error").length,
- }, timing);
-
- return result;
-}
-
-// ---------------------------------------------------------------------------
-// Execute (only when run directly)
-// ---------------------------------------------------------------------------
-
-function isMainModule(): boolean {
- try {
- const scriptPath = realpathSync(resolve(process.argv[1] || ""));
- const modulePath = realpathSync(fileURLToPath(import.meta.url));
- return scriptPath === modulePath;
- } catch {
- return false;
- }
-}
-
-if (isMainModule()) {
- try {
- const output = run();
- process.stdout.write(output);
- } catch (err) {
- const entry = [
- `[${new Date().toISOString()}] CRASH in posttooluse-validate.mts`,
- ` error: ${(err as Error)?.message || String(err)}`,
- ` stack: ${(err as Error)?.stack || "(no stack)"}`,
- ` PLUGIN_ROOT: ${PLUGIN_ROOT}`,
- "",
- ].join("\n");
- process.stderr.write(entry);
- process.stdout.write("{}");
- }
-}
diff --git a/hooks/src/posttooluse-verification-observe.mts b/hooks/src/posttooluse-verification-observe.mts
deleted file mode 100644
index d5b08f1..0000000
--- a/hooks/src/posttooluse-verification-observe.mts
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/usr/bin/env node
-/**
- * PostToolUse hook: verification observer for Bash tool calls.
- *
- * Maps bash commands to verification boundaries (uiRender, clientRequest,
- * serverHandler, environment) and emits structured log events for the
- * verification pipeline.
- *
- * Story inference derives the target route from recent file edits stored
- * in VERCEL_PLUGIN_RECENT_EDITS env var (set by PreToolUse), falling back
- * to extracting route hints from the command itself.
- *
- * Input: JSON on stdin with tool_name, tool_input, session_id, cwd
- * Output: JSON on stdout — {} (observer only emits log events, no additionalContext)
- */
-
-import type { SyncHookJSONOutput } from "@anthropic-ai/claude-agent-sdk";
-import { readFileSync, realpathSync } from "node:fs";
-import { resolve } from "node:path";
-import { fileURLToPath } from "node:url";
-import { pluginRoot as resolvePluginRoot, generateVerificationId } from "./hook-env.mjs";
-import { createLogger } from "./logger.mjs";
-import type { Logger } from "./logger.mjs";
-
-// ---------------------------------------------------------------------------
-// Types
-// ---------------------------------------------------------------------------
-
-export type BoundaryType =
- | "uiRender"
- | "clientRequest"
- | "serverHandler"
- | "environment"
- | "unknown";
-
-export interface VerificationBoundaryEvent {
- event: "verification.boundary_observed";
- boundary: BoundaryType;
- verificationId: string;
- command: string;
- matchedPattern: string;
- inferredRoute: string | null;
- timestamp: string;
-}
-
-export interface VerificationReport {
- type: "verification.report/v1";
- verificationId: string;
- boundaries: VerificationBoundaryEvent[];
- inferredRoute: string | null;
- storyContext: string | null;
- firstBrokenBoundary: BoundaryType | null;
-}
-
-// ---------------------------------------------------------------------------
-// Type guard
-// ---------------------------------------------------------------------------
-
-export function isVerificationReport(value: unknown): value is VerificationReport {
- if (typeof value !== "object" || value === null) return false;
- const obj = value as Record;
- return (
- obj.type === "verification.report/v1" &&
- typeof obj.verificationId === "string" &&
- Array.isArray(obj.boundaries) &&
- obj.boundaries.every(
- (b: unknown) =>
- typeof b === "object" &&
- b !== null &&
- (b as Record).event === "verification.boundary_observed",
- )
- );
-}
-
-// ---------------------------------------------------------------------------
-// Boundary pattern mapping
-// ---------------------------------------------------------------------------
-
-interface BoundaryPattern {
- boundary: BoundaryType;
- pattern: RegExp;
- label: string;
-}
-
-const BOUNDARY_PATTERNS: BoundaryPattern[] = [
- // uiRender: browser/screenshot/playwright/puppeteer commands
- { boundary: "uiRender", pattern: /\b(open|launch|browse|screenshot|puppeteer|playwright|chromium|firefox|webkit)\b/i, label: "browser-tool" },
- { boundary: "uiRender", pattern: /\bopen\s+https?:/i, label: "open-url" },
- { boundary: "uiRender", pattern: /\bnpx\s+playwright\b/i, label: "playwright-cli" },
-
- // clientRequest: curl, fetch, wget, httpie
- { boundary: "clientRequest", pattern: /\b(curl|wget|http|httpie)\b/i, label: "http-client" },
- { boundary: "clientRequest", pattern: /\bfetch\s*\(/i, label: "fetch-call" },
- { boundary: "clientRequest", pattern: /\bnpx\s+undici\b/i, label: "undici-cli" },
-
- // serverHandler: log tailing, server process inspection
- { boundary: "serverHandler", pattern: /\b(tail|less|cat)\b.*\.(log|out|err)\b/i, label: "log-tail" },
- { boundary: "serverHandler", pattern: /\b(tail\s+-f|journalctl\s+-f)\b/i, label: "log-follow" },
- { boundary: "serverHandler", pattern: /\blog(s)?\s/i, label: "log-command" },
- { boundary: "serverHandler", pattern: /\b(vercel\s+logs|vercel\s+inspect)\b/i, label: "vercel-logs" },
- { boundary: "serverHandler", pattern: /\b(lsof|netstat|ss)\s.*:(3000|3001|4000|5173|8080)\b/i, label: "port-inspect" },
-
- // environment: env reads, config inspection
- { boundary: "environment", pattern: /\b(printenv|env\b|echo\s+\$)/i, label: "env-read" },
- { boundary: "environment", pattern: /\bvercel\s+env\b/i, label: "vercel-env" },
- { boundary: "environment", pattern: /\bcat\b.*\.env\b/i, label: "dotenv-read" },
- { boundary: "environment", pattern: /\bnode\s+-e\b.*process\.env\b/i, label: "node-env" },
-];
-
-/**
- * Classify a bash command into a verification boundary type.
- */
-export function classifyBoundary(command: string): { boundary: BoundaryType; matchedPattern: string } {
- for (const bp of BOUNDARY_PATTERNS) {
- if (bp.pattern.test(command)) {
- return { boundary: bp.boundary, matchedPattern: bp.label };
- }
- }
- return { boundary: "unknown", matchedPattern: "none" };
-}
-
-// ---------------------------------------------------------------------------
-// Story inference
-// ---------------------------------------------------------------------------
-
-const ROUTE_REGEX = /\b(?:app|pages|src\/pages|src\/app)\/([\w[\].-]+(?:\/[\w[\].-]+)*)/;
-const URL_ROUTE_REGEX = /https?:\/\/[^/\s]+(\/([\w-]+(?:\/[\w-]+)*))/;
-
-/**
- * Infer the target route from recent file edits or the command itself.
- *
- * Sources (in priority order):
- * 1. VERCEL_PLUGIN_RECENT_EDITS — comma-delimited recent file paths
- * 2. Route patterns in the command (e.g., curl http://localhost:3000/settings)
- * 3. null if no route can be inferred
- */
-export function inferRoute(command: string, recentEdits?: string): string | null {
- // Source 1: recent edits
- if (recentEdits) {
- const paths = recentEdits.split(",").map((p) => p.trim()).filter(Boolean);
- for (const p of paths) {
- const match = ROUTE_REGEX.exec(p);
- if (match) {
- const route = "/" + match[1]
- .replace(/\/page\.\w+$/, "")
- .replace(/\/route\.\w+$/, "")
- .replace(/\/layout\.\w+$/, "")
- .replace(/\/loading\.\w+$/, "")
- .replace(/\/error\.\w+$/, "")
- .replace(/\[([^\]]+)\]/g, ":$1");
- return route === "/" ? "/" : route.replace(/\/$/, "");
- }
- }
- }
-
- // Source 2: URL in command
- const urlMatch = URL_ROUTE_REGEX.exec(command);
- if (urlMatch && urlMatch[1]) {
- return urlMatch[1];
- }
-
- return null;
-}
-
-// ---------------------------------------------------------------------------
-// Input parsing
-// ---------------------------------------------------------------------------
-
-export interface ParsedBashInput {
- command: string;
- sessionId: string | null;
- cwd: string | null;
-}
-
-export function parseInput(raw: string, logger?: Logger): ParsedBashInput | null {
- const trimmed = (raw || "").trim();
- if (!trimmed) return null;
-
- let input: Record;
- try {
- input = JSON.parse(trimmed);
- } catch {
- return null;
- }
-
- const toolName = (input.tool_name as string) || "";
- if (toolName !== "Bash") return null;
-
- const toolInput = (input.tool_input as Record) || {};
- const command = (toolInput.command as string) || "";
- if (!command) return null;
-
- const sessionId = (input.session_id as string) || null;
- const cwdCandidate = input.cwd ?? input.working_directory;
- const cwd = typeof cwdCandidate === "string" && cwdCandidate.trim() !== "" ? cwdCandidate : null;
-
- return { command, sessionId, cwd };
-}
-
-// ---------------------------------------------------------------------------
-// Orchestrator
-// ---------------------------------------------------------------------------
-
-export function run(rawInput?: string): string {
- const log: Logger = createLogger();
-
- let raw: string;
- if (rawInput !== undefined) {
- raw = rawInput;
- } else {
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return "{}";
- }
- }
-
- const parsed = parseInput(raw, log);
- if (!parsed) {
- log.debug("verification-observe-skip", { reason: "no_bash_input" });
- return "{}";
- }
-
- const { command, sessionId } = parsed;
- const { boundary, matchedPattern } = classifyBoundary(command);
-
- if (boundary === "unknown") {
- log.trace("verification-observe-skip", { reason: "no_boundary_match", command: command.slice(0, 120) });
- return "{}";
- }
-
- const verificationId = generateVerificationId();
- const recentEdits = process.env.VERCEL_PLUGIN_RECENT_EDITS || "";
- const inferredRoute = inferRoute(command, recentEdits);
-
- const boundaryEvent: VerificationBoundaryEvent = {
- event: "verification.boundary_observed",
- boundary,
- verificationId,
- command: command.slice(0, 200),
- matchedPattern,
- inferredRoute,
- timestamp: new Date().toISOString(),
- };
-
- log.summary("verification.boundary_observed", boundaryEvent as unknown as Record);
-
- log.complete("verification-observe-done", {
- matchedCount: 1,
- injectedCount: 0,
- });
-
- return "{}";
-}
-
-// ---------------------------------------------------------------------------
-// Execute (only when run directly)
-// ---------------------------------------------------------------------------
-
-function isMainModule(): boolean {
- try {
- const scriptPath = realpathSync(resolve(process.argv[1] || ""));
- const modulePath = realpathSync(fileURLToPath(import.meta.url));
- return scriptPath === modulePath;
- } catch {
- return false;
- }
-}
-
-if (isMainModule()) {
- try {
- const output = run();
- process.stdout.write(output);
- } catch (err) {
- const entry = [
- `[${new Date().toISOString()}] CRASH in posttooluse-verification-observe.mts`,
- ` error: ${(err as Error)?.message || String(err)}`,
- ` stack: ${(err as Error)?.stack || "(no stack)"}`,
- "",
- ].join("\n");
- process.stderr.write(entry);
- process.stdout.write("{}");
- }
-}
diff --git a/hooks/src/pretooluse-skill-inject.mts b/hooks/src/pretooluse-skill-inject.mts
index 355199c..b1a7504 100644
--- a/hooks/src/pretooluse-skill-inject.mts
+++ b/hooks/src/pretooluse-skill-inject.mts
@@ -58,7 +58,6 @@ import { resolveVercelJsonSkills, isVercelJsonPath, VERCEL_JSON_SKILLS } from ".
import type { VercelJsonRouting } from "./vercel-config.mjs";
import { createLogger, logDecision } from "./logger.mjs";
import type { Logger } from "./logger.mjs";
-import { trackBaseEvents } from "./telemetry.mjs";
import { selectManagedContextChunk } from "./vercel-context.mjs";
const MAX_SKILLS = 3;
@@ -963,14 +962,6 @@ function run(): string {
const { toolName, toolInput, sessionId, cwd, platform, toolTarget, scopeId } = parsed;
const runtimeEnvBefore = captureRuntimeEnvSnapshot();
- // Base telemetry — enabled by default unless VERCEL_PLUGIN_TELEMETRY=off
- if (sessionId) {
- const toolEntries: Array<{ key: string; value: string }> = [
- { key: "tool_call:tool_name", value: toolName },
- ];
- trackBaseEvents(sessionId, toolEntries).catch(() => {});
- }
-
// Stage 2: loadSkills
const tSkillmap = log.active ? log.now() : 0;
const skills = loadSkills(PLUGIN_ROOT, log);
@@ -1196,21 +1187,6 @@ function run(): string {
droppedByBudget,
}, cwd);
- // Base telemetry — enabled by default unless VERCEL_PLUGIN_TELEMETRY=off
- if (sessionId) {
- const telemetryEntries: Array<{ key: string; value: string }> = [];
- for (const skill of loaded) {
- const reason = matchReasons?.[skill];
- telemetryEntries.push(
- { key: "skill:injected", value: skill },
- { key: "skill:hook", value: "PreToolUse" },
- { key: "skill:priority", value: "0" },
- { key: "skill:match_type", value: reason?.matchType ?? "unknown" },
- { key: "skill:tool_name", value: toolName },
- );
- }
- trackBaseEvents(sessionId, telemetryEntries).catch(() => {});
- }
}
return result;
diff --git a/hooks/src/pretooluse-subagent-spawn-observe.mts b/hooks/src/pretooluse-subagent-spawn-observe.mts
deleted file mode 100644
index 05d47f9..0000000
--- a/hooks/src/pretooluse-subagent-spawn-observe.mts
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env node
-/**
- * PreToolUse hook: observes Agent tool launches and records pending
- * subagent spawn metadata for later correlation.
- *
- * Input: JSON on stdin with tool_name, tool_input, session_id, cwd
- * Output: JSON on stdout — {} (observer only, no tool mutation)
- */
-
-import { readFileSync } from "node:fs";
-import { resolve } from "node:path";
-import { fileURLToPath } from "node:url";
-import { appendPendingLaunch, type PendingLaunch } from "./subagent-state.mjs";
-import { createLogger, logCaughtError } from "./logger.mjs";
-
-const log = createLogger();
-const EMPTY_OUTPUT = "{}";
-
-function isRecord(value: unknown): value is Record {
- return typeof value === "object" && value !== null && !Array.isArray(value);
-}
-
-function resolveSessionId(input: Record, env: NodeJS.ProcessEnv): string | null {
- if (typeof input.session_id === "string" && input.session_id.trim() !== "") {
- return input.session_id;
- }
-
- if (typeof env.SESSION_ID === "string" && env.SESSION_ID.trim() !== "") {
- return env.SESSION_ID;
- }
-
- return null;
-}
-
-export function parseInput(raw: string, env: NodeJS.ProcessEnv = process.env): {
- sessionId: string;
- toolInput: Record;
-} | null {
- const trimmed = (raw || "").trim();
- if (!trimmed) {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "stdin_empty" });
- return null;
- }
-
- let input: Record;
- try {
- const parsed = JSON.parse(trimmed);
- if (!isRecord(parsed)) {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "stdin_not_object" });
- return null;
- }
- input = parsed;
- } catch {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "stdin_parse_fail" });
- return null;
- }
-
- const toolName = typeof input.tool_name === "string" ? input.tool_name : "";
- if (toolName !== "Agent") {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "unsupported_tool", toolName });
- return null;
- }
-
- const sessionId = resolveSessionId(input, env);
- if (!sessionId) {
- log.debug("pretooluse-subagent-spawn-observe-skip", { reason: "missing_session_id" });
- return null;
- }
-
- const toolInput = isRecord(input.tool_input) ? input.tool_input : {};
- return { sessionId, toolInput };
-}
-
-export function buildPendingLaunchRecord(toolInput: Record, createdAt: number): PendingLaunch {
- const resume = typeof toolInput.resume === "string" ? toolInput.resume : undefined;
- const name = typeof toolInput.name === "string" ? toolInput.name : undefined;
- const pendingLaunch: PendingLaunch = {
- description: typeof toolInput.description === "string" ? toolInput.description : "",
- prompt: typeof toolInput.prompt === "string" ? toolInput.prompt : "",
- subagent_type: typeof toolInput.subagent_type === "string" ? toolInput.subagent_type : "",
- createdAt,
- ...(resume !== undefined ? { resume } : {}),
- ...(name !== undefined ? { name } : {}),
- };
-
- return pendingLaunch;
-}
-
-export function writePendingLaunchRecord(sessionId: string, toolInput: Record): string {
- const createdAt = Date.now();
- const payload = buildPendingLaunchRecord(toolInput, createdAt);
-
- appendPendingLaunch(sessionId, payload);
-
- log.debug("pretooluse-subagent-spawn-observe-recorded", {
- sessionId,
- subagentType: typeof payload.subagent_type === "string" ? payload.subagent_type : null,
- name: typeof payload.name === "string" ? payload.name : null,
- });
-
- return sessionId;
-}
-
-export function run(rawInput?: string): string {
- let raw = rawInput;
-
- if (raw === undefined) {
- try {
- raw = readFileSync(0, "utf-8");
- } catch {
- return EMPTY_OUTPUT;
- }
- }
-
- const parsed = parseInput(raw);
- if (!parsed) {
- return EMPTY_OUTPUT;
- }
-
- try {
- writePendingLaunchRecord(parsed.sessionId, parsed.toolInput);
- } catch (error) {
- logCaughtError(log, "pretooluse-subagent-spawn-observe-write-failed", error, {
- attempted: "write_pending_launch_record",
- sessionId: parsed.sessionId,
- state: "launch_observation_failed",
- });
- }
-
- return EMPTY_OUTPUT;
-}
-
-function isMainModule(): boolean {
- const entrypoint = fileURLToPath(import.meta.url);
- return process.argv[1] ? resolve(process.argv[1]) === entrypoint : false;
-}
-
-if (isMainModule()) {
- process.stdout.write(run());
-}
diff --git a/hooks/src/session-end-cleanup.mts b/hooks/src/session-end-cleanup.mts
index ffbd200..6efe130 100644
--- a/hooks/src/session-end-cleanup.mts
+++ b/hooks/src/session-end-cleanup.mts
@@ -1,13 +1,13 @@
#!/usr/bin/env node
/**
* SessionEnd hook: best-effort cleanup of session-scoped temp files.
- * Deletes main and all agent-scoped claim dirs, session files, and profile cache.
+ * Deletes main and all agent-scoped claim dirs plus session-scoped temp files.
* Always exits successfully.
*/
import { createHash } from "node:crypto";
-import { readdirSync, readFileSync, rmSync, unlinkSync, writeFileSync } from "node:fs";
-import { homedir, tmpdir } from "node:os";
+import { readdirSync, readFileSync, rmSync, unlinkSync } from "node:fs";
+import { tmpdir } from "node:os";
import { join, resolve } from "node:path";
import { fileURLToPath } from "node:url";
@@ -66,17 +66,6 @@ function parseSessionIdFromStdin(): string | null {
}
function main(): void {
- // Convert "asked" telemetry preference to "disabled" (opt-out by default)
- try {
- const prefPath = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
- const pref = readFileSync(prefPath, "utf-8").trim();
- if (pref === "asked") {
- writeFileSync(prefPath, "disabled");
- }
- } catch {
- // File doesn't exist or can't be read — nothing to do
- }
-
const sessionId = parseSessionIdFromStdin();
if (sessionId === null) {
process.exit(0);
@@ -84,7 +73,7 @@ function main(): void {
const tempRoot = tmpdir();
const prefix = `vercel-plugin-${tempSessionIdSegment(sessionId)}-`;
- // Glob all session-scoped temp entries (main + agent-scoped claim dirs, files, profile cache)
+ // Glob all session-scoped temp entries (main + agent-scoped claim dirs, files)
let entries: string[] = [];
try {
entries = readdirSync(tempRoot).filter((name) => name.startsWith(prefix));
@@ -94,7 +83,7 @@ function main(): void {
for (const entry of entries) {
const fullPath = join(tempRoot, entry);
- if (entry.endsWith(".d") || entry.endsWith("-pending-launches")) {
+ if (entry.endsWith(".d")) {
removeDirIfPresent(fullPath);
} else {
removeFileIfPresent(fullPath);
diff --git a/hooks/src/session-start-activation.mts b/hooks/src/session-start-activation.mts
new file mode 100644
index 0000000..5b7369e
--- /dev/null
+++ b/hooks/src/session-start-activation.mts
@@ -0,0 +1,67 @@
+import { existsSync, readdirSync, type Dirent } from "node:fs";
+import { join } from "node:path";
+import { safeReadJson } from "./hook-env.mjs";
+
+interface PackageJson {
+ dependencies?: Record;
+ devDependencies?: Record;
+ scripts?: Record;
+}
+
+const ACTIVATION_MARKER_FILES: string[] = [
+ "vercel.json",
+ "next.config.js",
+ "next.config.mjs",
+ "next.config.ts",
+ "next.config.mts",
+];
+
+function readPackageJson(projectRoot: string): PackageJson | null {
+ return safeReadJson(join(projectRoot, "package.json"));
+}
+
+function packageJsonSignalsVercel(projectRoot: string): boolean {
+ const pkg = readPackageJson(projectRoot);
+ if (!pkg) return false;
+
+ const allDeps: Record = {
+ ...(pkg.dependencies || {}),
+ ...(pkg.devDependencies || {}),
+ };
+
+ if (Object.keys(allDeps).some((dep: string) =>
+ dep === "next" || dep === "vercel" || dep.startsWith("@vercel/"),
+ )) {
+ return true;
+ }
+
+ const scripts = pkg.scripts && typeof pkg.scripts === "object" ? pkg.scripts : {};
+ return Object.values(scripts).some((value: unknown) =>
+ typeof value === "string" && /\bvercel\b/.test(value),
+ );
+}
+
+export function hasSessionStartActivationMarkers(projectRoot: string): boolean {
+ if (ACTIVATION_MARKER_FILES.some((file: string) => existsSync(join(projectRoot, file)))) {
+ return true;
+ }
+
+ if (existsSync(join(projectRoot, ".vercel"))) {
+ return true;
+ }
+
+ return packageJsonSignalsVercel(projectRoot);
+}
+
+export function isGreenfieldDirectory(projectRoot: string): boolean {
+ let dirents: Dirent[];
+ try {
+ dirents = readdirSync(projectRoot, { withFileTypes: true });
+ } catch {
+ return false;
+ }
+
+ const hasNonDotDir = dirents.some((d: Dirent) => !d.name.startsWith("."));
+ const hasDotFile = dirents.some((d: Dirent) => d.name.startsWith(".") && d.isFile());
+ return !hasNonDotDir && !hasDotFile;
+}
diff --git a/hooks/src/session-start-profiler.mts b/hooks/src/session-start-profiler.mts
index 6c280c9..de3b9f0 100644
--- a/hooks/src/session-start-profiler.mts
+++ b/hooks/src/session-start-profiler.mts
@@ -16,7 +16,6 @@ import {
existsSync,
readFileSync,
readdirSync,
- writeFileSync,
type Dirent,
} from "node:fs";
import { homedir } from "node:os";
@@ -29,10 +28,11 @@ import {
setSessionEnv,
type HookPlatform,
} from "./compat.mjs";
-import { pluginRoot, profileCachePath, safeReadJson, writeSessionFile } from "./hook-env.mjs";
+import { pluginRoot, safeReadJson, writeSessionFile } from "./hook-env.mjs";
import { createLogger, logCaughtError, type Logger } from "./logger.mjs";
+import { hasSessionStartActivationMarkers } from "./session-start-activation.mjs";
import { buildSkillMap } from "./skill-map-frontmatter.mjs";
-import { trackBaseEvents, getOrCreateDeviceId } from "./telemetry.mjs";
+import { trackDauActiveToday } from "./telemetry.mjs";
// ---------------------------------------------------------------------------
// Types
@@ -620,10 +620,30 @@ async function main(): Promise {
const sessionId = normalizeSessionStartSessionId(hookInput);
const projectRoot = resolveSessionStartProjectRoot();
- logBrokenSkillFrontmatterSummary();
-
// Greenfield check — seed defaults and skip repository exploration.
const greenfield: GreenfieldResult | null = checkGreenfield(projectRoot);
+ const shouldActivate = greenfield !== null || !existsSync(projectRoot) || hasSessionStartActivationMarkers(projectRoot);
+
+ if (!shouldActivate) {
+ log.debug("session-start-profiler:skipped-non-vercel-project", {
+ projectRoot,
+ reason: "non-empty-without-vercel-markers",
+ });
+
+ if (sessionId) {
+ writeSessionFile(sessionId, SESSION_GREENFIELD_KIND, "");
+ writeSessionFile(sessionId, SESSION_LIKELY_SKILLS_KIND, "");
+ }
+
+ if (platform === "cursor") {
+ process.stdout.write(JSON.stringify(formatOutput("cursor", {})));
+ }
+
+ await trackDauActiveToday().catch(() => {});
+ process.exit(0);
+ }
+
+ logBrokenSkillFrontmatterSummary();
// Vercel CLI version check
const cliStatus: VercelCliStatus = checkVercelCli();
@@ -675,39 +695,8 @@ async function main(): Promise {
process.stdout.write(`${additionalContext}\n\n`);
}
- // Write profile cache so SubagentStart hooks can read it without re-profiling
- if (sessionId) {
- try {
- const cache = {
- projectRoot,
- likelySkills,
- greenfield: greenfield !== null,
- bootstrapHints: setupSignals.bootstrapHints,
- resourceHints: setupSignals.resourceHints,
- setupMode: setupSignals.setupMode,
- timestamp: new Date().toISOString(),
- };
- writeFileSync(profileCachePath(sessionId), JSON.stringify(cache), "utf-8");
- } catch (error) {
- logCaughtError(log, "session-start-profiler:write-profile-cache-failed", error, {
- sessionId,
- projectRoot,
- });
- }
- }
-
- // Base telemetry — enabled by default unless VERCEL_PLUGIN_TELEMETRY=off
- if (sessionId) {
- const deviceId = getOrCreateDeviceId();
- await trackBaseEvents(sessionId, [
- { key: "session:device_id", value: deviceId },
- { key: "session:platform", value: process.platform },
- { key: "session:likely_skills", value: likelySkills.join(",") },
- { key: "session:greenfield", value: String(greenfield !== null) },
- { key: "session:vercel_cli_installed", value: String(cliStatus.installed) },
- { key: "session:vercel_cli_version", value: cliStatus.currentVersion || "" },
- ]).catch(() => {});
- }
+ // DAU phone-home — enabled by default unless VERCEL_PLUGIN_TELEMETRY=off
+ await trackDauActiveToday().catch(() => {});
if (cursorOutput) {
process.stdout.write(cursorOutput);
diff --git a/hooks/src/setup-telemetry.mts b/hooks/src/setup-telemetry.mts
index 033fd7e..29d08db 100644
--- a/hooks/src/setup-telemetry.mts
+++ b/hooks/src/setup-telemetry.mts
@@ -1,42 +1,19 @@
#!/usr/bin/env node
-import { readFileSync, writeFileSync, mkdirSync } from "node:fs";
-import { homedir } from "node:os";
-import { join, dirname } from "node:path";
import { getTelemetryOverride } from "./telemetry.mjs";
-const PREF_PATH = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
-
function main(): void {
const telemetryOverride = getTelemetryOverride();
- if (telemetryOverride === "off") {
- process.stdout.write(
- "Telemetry is fully disabled via VERCEL_PLUGIN_TELEMETRY=off.\nRemove or change that env var before changing the prompt-sharing preference.\n",
- );
- process.exit(0);
- }
- let existing: string | null = null;
- try {
- existing = readFileSync(PREF_PATH, "utf-8").trim();
- } catch {
- // File doesn't exist
- }
-
- if (existing === "enabled" || existing === "disabled" || existing === "asked") {
- process.stdout.write(
- `Prompt telemetry is currently ${existing}. This setting controls prompt text sharing only.\nTo change prompt sharing, edit ~/.claude/vercel-plugin-telemetry-preference. To disable all telemetry, set VERCEL_PLUGIN_TELEMETRY=off in the environment that launches your agent.\n`,
- );
+ if (telemetryOverride === "off") {
+ process.stdout.write("Telemetry is fully disabled via VERCEL_PLUGIN_TELEMETRY=off.\n");
process.exit(0);
}
process.stdout.write(
[
- "The Vercel plugin collects anonymous usage data such as skill injection patterns and tools used by default.",
- "Ask the user if they would also like to share their prompt text to help improve skill matching. This choice only controls prompt telemetry.",
- "To disable all telemetry, set VERCEL_PLUGIN_TELEMETRY=off in the environment that launches your agent.",
- "If they agree, run `echo 'enabled' > ~/.claude/vercel-plugin-telemetry-preference`.",
- "If they decline, run `echo 'disabled' > ~/.claude/vercel-plugin-telemetry-preference`.",
+ "The default telemetry profile is a once-per-day DAU phone-home that sends only dau:active_today.",
+ "To disable all telemetry, set VERCEL_PLUGIN_TELEMETRY=off.",
"",
].join("\n"),
);
diff --git a/hooks/src/subagent-start-bootstrap.mts b/hooks/src/subagent-start-bootstrap.mts
deleted file mode 100644
index 8dc06fd..0000000
--- a/hooks/src/subagent-start-bootstrap.mts
+++ /dev/null
@@ -1,426 +0,0 @@
-#!/usr/bin/env node
-/**
- * SubagentStart hook: injects project context into spawned subagents.
- *
- * Input: JSON on stdin with { session_id, cwd, agent_id, agent_type, hook_event_name }
- * Output: JSON on stdout with { hookSpecificOutput: { hookEventName: "SubagentStart", additionalContext: "..." } } or {}
- *
- * Reads the cached profiler results from disk (profile.json) rather than
- * re-running the profiler, falling back to env var when cache is unavailable.
- *
- * Agent type budgets:
- * Explore — minimal (~1KB): project profile + top skill names only
- * Plan — light (~3KB): profile + top skill summaries + deployment constraints
- * general-purpose — standard (~8KB): profile + top skills with full bodies
- * other / custom — standard (~8KB): treat as general-purpose
- */
-
-import type { SyncHookJSONOutput } from "@anthropic-ai/claude-agent-sdk";
-import { readFileSync } from "node:fs";
-import { join, resolve } from "node:path";
-import { fileURLToPath } from "node:url";
-import { pluginRoot as resolvePluginRoot, profileCachePath, safeReadFile, safeReadJson, tryClaimSessionKey } from "./hook-env.mjs";
-import { createLogger, logCaughtError, type Logger } from "./logger.mjs";
-import { compilePromptSignals, matchPromptWithReason, normalizePromptText } from "./prompt-patterns.mjs";
-import { loadSkills } from "./pretooluse-skill-inject.mjs";
-import { extractFrontmatter } from "./skill-map-frontmatter.mjs";
-import { claimPendingLaunch } from "./subagent-state.mjs";
-
-const PLUGIN_ROOT = resolvePluginRoot();
-
-/** Budget caps per agent type category (bytes). */
-export const MINIMAL_BUDGET_BYTES = 1_024;
-export const LIGHT_BUDGET_BYTES = 3_072;
-export const STANDARD_BUDGET_BYTES = 8_000;
-
-const log: Logger = createLogger();
-
-// ---------------------------------------------------------------------------
-// Input parsing
-// ---------------------------------------------------------------------------
-
-interface SubagentStartInput {
- session_id?: string;
- cwd?: string;
- agent_id?: string;
- agent_type?: string;
- hook_event_name?: string;
-}
-
-function parseInput(): SubagentStartInput | null {
- try {
- const raw = readFileSync(0, "utf8");
- if (!raw.trim()) return null;
- return JSON.parse(raw) as SubagentStartInput;
- } catch {
- return null;
- }
-}
-
-// ---------------------------------------------------------------------------
-// Profile cache
-// ---------------------------------------------------------------------------
-
-interface ProfileCache {
- projectRoot: string;
- likelySkills: string[];
- greenfield: boolean;
- bootstrapHints: string[];
- resourceHints: string[];
- setupMode: boolean;
- timestamp: string;
-}
-
-/**
- * Read likely skills from the cached profile on disk, falling back to the
- * VERCEL_PLUGIN_LIKELY_SKILLS env var if the cache is unavailable.
- */
-function getLikelySkills(sessionId: string | undefined): string[] {
- // Try disk cache first
- if (sessionId) {
- const cache = safeReadJson(profileCachePath(sessionId));
- if (cache && Array.isArray(cache.likelySkills) && cache.likelySkills.length > 0) {
- log.debug("subagent-start-bootstrap:profile-cache-hit", { sessionId, skills: cache.likelySkills });
- return cache.likelySkills;
- }
- log.debug("subagent-start-bootstrap:profile-cache-miss", { sessionId });
- }
-
- // Fallback to env var
- const raw = process.env.VERCEL_PLUGIN_LIKELY_SKILLS;
- if (!raw || raw.trim() === "") return [];
- return raw.split(",").map((s) => s.trim()).filter(Boolean);
-}
-
-// ---------------------------------------------------------------------------
-// Budget category resolution
-// ---------------------------------------------------------------------------
-
-type BudgetCategory = "minimal" | "light" | "standard";
-
-function resolveBudgetCategory(agentType: string): BudgetCategory {
- if (agentType === "Explore") return "minimal";
- if (agentType === "Plan") return "light";
- return "standard";
-}
-
-function budgetBytesForCategory(category: BudgetCategory): number {
- switch (category) {
- case "minimal": return MINIMAL_BUDGET_BYTES;
- case "light": return LIGHT_BUDGET_BYTES;
- case "standard": return STANDARD_BUDGET_BYTES;
- }
-}
-
-interface PromptMatchedSkill {
- skill: string;
- score: number;
- priority: number;
-}
-
-function getPromptMatchedSkills(promptText: string): PromptMatchedSkill[] {
- const normalizedPrompt = normalizePromptText(promptText);
- if (!normalizedPrompt) return [];
-
- try {
- const loaded = loadSkills(PLUGIN_ROOT, log);
- if (!loaded) return [];
-
- const matches: PromptMatchedSkill[] = [];
- for (const [skill, config] of Object.entries(loaded.skillMap)) {
- if (!config.promptSignals) continue;
-
- const compiled = compilePromptSignals(config.promptSignals);
- const result = matchPromptWithReason(normalizedPrompt, compiled);
- if (!result.matched) continue;
-
- matches.push({
- skill,
- score: result.score,
- priority: config.priority,
- });
- }
-
- matches.sort((left, right) => {
- if (right.score !== left.score) return right.score - left.score;
- if (right.priority !== left.priority) return right.priority - left.priority;
- return left.skill.localeCompare(right.skill);
- });
-
- log.debug("subagent-start-bootstrap:prompt-skill-match", {
- promptLength: promptText.length,
- matchedSkills: matches.map(({ skill, score }) => ({ skill, score })),
- });
- return matches;
- } catch (error) {
- logCaughtError(log, "subagent-start-bootstrap:prompt-skill-match-failed", error, {
- promptLength: promptText.length,
- });
- return [];
- }
-}
-
-function mergeLikelySkills(likelySkills: string[], promptMatchedSkills: PromptMatchedSkill[]): string[] {
- if (promptMatchedSkills.length === 0) return likelySkills;
- const promptSkillNames = promptMatchedSkills.map((entry) => entry.skill);
- return [...new Set([...promptSkillNames, ...likelySkills])];
-}
-
-function resolveLikelySkillsFromPendingLaunch(
- sessionId: string | undefined,
- agentType: string,
- likelySkills: string[],
-): string[] {
- if (!sessionId) return likelySkills;
-
- try {
- const pendingLaunch = claimPendingLaunch(sessionId, agentType);
- if (!pendingLaunch) {
- log.debug("subagent-start-bootstrap:pending-launch", {
- sessionId,
- agentType,
- claimedLaunch: false,
- likelySkills,
- });
- return likelySkills;
- }
-
- const promptText = `${pendingLaunch.description} ${pendingLaunch.prompt}`.trim();
- const promptMatchedSkills = getPromptMatchedSkills(promptText);
- const effectiveLikelySkills = mergeLikelySkills(likelySkills, promptMatchedSkills);
-
- log.debug("subagent-start-bootstrap:pending-launch", {
- sessionId,
- agentType,
- claimedLaunch: true,
- promptMatchedSkills: promptMatchedSkills.map(({ skill, score }) => ({ skill, score })),
- likelySkills: effectiveLikelySkills,
- });
-
- return effectiveLikelySkills;
- } catch (error) {
- logCaughtError(log, "subagent-start-bootstrap:pending-launch-route-failed", error, {
- sessionId,
- agentType,
- likelySkills,
- });
- return likelySkills;
- }
-}
-
-// ---------------------------------------------------------------------------
-// Context assembly
-// ---------------------------------------------------------------------------
-
-function profileLine(agentType: string, likelySkills: string[]): string {
- return "Vercel plugin active. Project likely uses: " + (likelySkills.length > 0 ? likelySkills.join(", ") : "unknown stack") + ".";
-}
-
-/**
- * Build minimal context (~1KB): project profile + skill name list.
- * Used for Explore agents that only need orientation.
- */
-function buildMinimalContext(agentType: string, likelySkills: string[]): string {
- const parts: string[] = [];
- parts.push(``);
- parts.push(profileLine(agentType, likelySkills));
- parts.push("");
- return parts.join("\n");
-}
-
-/**
- * Build light context (~3KB): profile + skill summaries + deployment constraints.
- * Used for Plan agents that need enough context to architect solutions.
- */
-function buildLightContext(agentType: string, likelySkills: string[], budgetBytes: number): string {
- const parts: string[] = [];
- parts.push(``);
- parts.push(profileLine(agentType, likelySkills));
-
- let usedBytes = Buffer.byteLength(parts.join("\n"), "utf8");
-
- // Add skill summaries
- const loaded = loadSkills(PLUGIN_ROOT, log);
- if (loaded) {
- for (const skill of likelySkills) {
- const config = loaded.skillMap[skill];
- if (!config) continue;
- const summary = config.summary;
- if (!summary) continue;
-
- const line = `- **${skill}**: ${summary}`;
- const lineBytes = Buffer.byteLength(line, "utf8");
- if (usedBytes + lineBytes + 1 > budgetBytes) break;
- parts.push(line);
- usedBytes += lineBytes + 1;
- }
- }
-
- // Add deployment constraints if budget allows
- const constraints = [
- "Deployment targets Vercel. Use framework conventions (e.g. Next.js app router, API routes).",
- "Environment variables are managed via `vercel env`. Do not hardcode secrets.",
- ];
- for (const constraint of constraints) {
- const lineBytes = Buffer.byteLength(constraint, "utf8");
- if (usedBytes + lineBytes + 1 > budgetBytes) break;
- parts.push(constraint);
- usedBytes += lineBytes + 1;
- }
-
- parts.push("");
- return parts.join("\n");
-}
-
-/**
- * Build standard context (~8KB): profile + top skill full bodies.
- * Used for general-purpose agents that need actionable skill content.
- */
-function buildStandardContext(agentType: string, likelySkills: string[], budgetBytes: number): string {
- const parts: string[] = [];
- parts.push(``);
- parts.push(profileLine(agentType, likelySkills));
-
- let usedBytes = Buffer.byteLength(parts.join("\n"), "utf8");
-
- // Load skill map once for summary fallbacks
- const loaded = loadSkills(PLUGIN_ROOT, log);
-
- // Inject full skill bodies for likely skills, falling back to summaries
- for (const skill of likelySkills) {
- const skillPath = join(PLUGIN_ROOT, "skills", skill, "SKILL.md");
- const raw = safeReadFile(skillPath);
- if (raw !== null) {
- const { body } = extractFrontmatter(raw);
- const content = body.trimStart();
- const wrapped = `\n${content}\n`;
- const byteLen = Buffer.byteLength(wrapped, "utf8");
-
- if (usedBytes + byteLen + 1 <= budgetBytes) {
- parts.push(wrapped);
- usedBytes += byteLen + 1;
- continue;
- }
- }
-
- // Fallback to summary if full body doesn't fit or file is missing
- const summary = loaded?.skillMap[skill]?.summary;
- if (summary) {
- const line = `\n${summary}\n`;
- const lineBytes = Buffer.byteLength(line, "utf8");
- if (usedBytes + lineBytes + 1 <= budgetBytes) {
- parts.push(line);
- usedBytes += lineBytes + 1;
- }
- }
- }
-
- parts.push("");
- return parts.join("\n");
-}
-
-// ---------------------------------------------------------------------------
-// Main
-// ---------------------------------------------------------------------------
-
-function main(): void {
- const input = parseInput();
- if (!input) {
- process.exit(0);
- }
-
- const agentId = input.agent_id ?? "unknown";
- const agentType = input.agent_type ?? "unknown";
- const sessionId = input.session_id;
-
- log.debug("subagent-start-bootstrap", { agentId, agentType, sessionId });
-
- const profilerLikelySkills = getLikelySkills(sessionId);
- const likelySkills = resolveLikelySkillsFromPendingLaunch(
- sessionId,
- agentType,
- profilerLikelySkills,
- );
-
- const category = resolveBudgetCategory(agentType);
- const maxBytes = budgetBytesForCategory(category);
-
- let context: string;
- switch (category) {
- case "minimal":
- context = buildMinimalContext(agentType, likelySkills);
- break;
- case "light":
- context = buildLightContext(agentType, likelySkills, maxBytes);
- break;
- case "standard":
- context = buildStandardContext(agentType, likelySkills, maxBytes);
- break;
- }
-
- // Hard-truncate if over budget (safety net)
- if (Buffer.byteLength(context, "utf8") > maxBytes) {
- context = Buffer.from(context, "utf8").subarray(0, maxBytes).toString("utf8");
- }
-
- // Persist dedup claims so PreToolUse won't re-inject the same skills.
- // Scope claims by agentId so sibling subagents don't cross-contaminate.
- const scopeId = agentId !== "unknown" ? agentId : undefined;
- if (sessionId && likelySkills.length > 0) {
- const claimed: string[] = [];
- for (const skill of likelySkills) {
- if (tryClaimSessionKey(sessionId, "seen-skills", skill, scopeId)) {
- claimed.push(skill);
- }
- }
- if (claimed.length > 0) {
- log.debug("subagent-start-bootstrap:dedup-claims", { sessionId, agentId, scopeId, claimed });
- }
- }
-
- const budgetUsed = Buffer.byteLength(context, "utf8");
-
- // Determine whether a pending launch was matched (profiler vs pending-launch divergence)
- const pendingLaunchMatched = likelySkills.length !== profilerLikelySkills.length
- || likelySkills.some((s) => !profilerLikelySkills.includes(s));
-
- log.summary("subagent-start-bootstrap:complete", {
- agent_id: agentId,
- agent_type: agentType,
- claimed_skills: likelySkills.length,
- budget_used: budgetUsed,
- budget_max: maxBytes,
- budget_category: category,
- pending_launch_matched: pendingLaunchMatched,
- });
-
- const output: SyncHookJSONOutput = {
- hookSpecificOutput: {
- hookEventName: "SubagentStart",
- additionalContext: context,
- },
- };
-
- process.stdout.write(JSON.stringify(output));
- process.exit(0);
-}
-
-const ENTRYPOINT = fileURLToPath(import.meta.url);
-const isEntrypoint = process.argv[1]
- ? resolve(process.argv[1]) === ENTRYPOINT
- : false;
-
-if (isEntrypoint) {
- main();
-}
-
-// Exports for testing
-export {
- parseInput,
- buildMinimalContext,
- buildLightContext,
- buildStandardContext,
- getLikelySkills,
- main,
-};
-export type { SubagentStartInput, ProfileCache, BudgetCategory };
diff --git a/hooks/src/subagent-state.mts b/hooks/src/subagent-state.mts
deleted file mode 100644
index a659fcb..0000000
--- a/hooks/src/subagent-state.mts
+++ /dev/null
@@ -1,321 +0,0 @@
-import { createHash } from "node:crypto";
-import {
- appendFileSync,
- closeSync,
- mkdirSync,
- openSync,
- readFileSync,
- renameSync,
- rmSync,
- statSync,
- writeFileSync,
-} from "node:fs";
-import { tmpdir } from "node:os";
-import { dirname, join, resolve } from "node:path";
-import * as hookEnvNs from "./hook-env.mjs";
-import { createLogger, logCaughtError, type Logger } from "./logger.mjs";
-
-export type PendingLaunch = {
- description: string;
- prompt: string;
- subagent_type: string;
- resume?: string;
- name?: string;
- createdAt: number;
-};
-
-const PENDING_LAUNCH_TTL_MS = 60_000;
-const LOCK_WAIT_TIMEOUT_MS = 2_000;
-const LOCK_WAIT_INTERVAL_MS = 10;
-const LOCK_STALE_MS = 5_000;
-
-type HookEnvWithTmpDir = typeof hookEnvNs & {
- getTmpDir?: () => string;
-};
-
-const hookEnv = hookEnvNs as HookEnvWithTmpDir;
-const log: Logger = createLogger();
-
-function isNodeErrorCode(error: unknown, code: string): boolean {
- return (
- typeof error === "object" &&
- error !== null &&
- "code" in error &&
- (error as { code?: string }).code === code
- );
-}
-
-function sleepMs(ms: number): void {
- const deadline = Date.now() + ms;
- while (Date.now() < deadline) {
- // Busy-wait for short lock contention windows.
- }
-}
-
-function resolveTmpRoot(): string {
- try {
- const tempRoot = hookEnv.getTmpDir?.();
- if (typeof tempRoot === "string" && tempRoot.trim() !== "") {
- return resolve(tempRoot);
- }
- } catch (error) {
- logCaughtError(log, "subagent-state:get-tmp-dir-failed", error, {});
- }
-
- return resolve(tmpdir());
-}
-
-function pendingLaunchPath(sessionId: string): string {
- return join(resolveTmpRoot(), `vercel-plugin-${sessionId}-pending-launches.jsonl`);
-}
-
-function pendingLaunchLockPath(sessionId: string): string {
- return `${pendingLaunchPath(sessionId)}.lock`;
-}
-
-function agentStatePath(sessionId: string, agentId: string): string {
- const agentHash = createHash("sha256").update(agentId).digest("hex");
- return join(resolveTmpRoot(), `vercel-plugin-${sessionId}-agent-${agentHash}.json`);
-}
-
-function maybeClearStaleLock(lockPath: string, context: Record): void {
- try {
- const stats = statSync(lockPath);
- if (Date.now() - stats.mtimeMs > LOCK_STALE_MS) {
- rmSync(lockPath, { force: true });
- log.debug("subagent-state:stale-lock-cleared", { lockPath, ...context });
- }
- } catch (error) {
- if (!isNodeErrorCode(error, "ENOENT")) {
- logCaughtError(log, "subagent-state:stale-lock-check-failed", error, { lockPath, ...context });
- }
- }
-}
-
-function acquireLock(lockPath: string, context: Record): boolean {
- mkdirSync(dirname(lockPath), { recursive: true });
-
- const deadline = Date.now() + LOCK_WAIT_TIMEOUT_MS;
- while (Date.now() <= deadline) {
- try {
- const fd = openSync(lockPath, "wx");
- closeSync(fd);
- return true;
- } catch (error) {
- if (isNodeErrorCode(error, "EEXIST")) {
- maybeClearStaleLock(lockPath, context);
- sleepMs(LOCK_WAIT_INTERVAL_MS);
- continue;
- }
-
- logCaughtError(log, "subagent-state:acquire-lock-failed", error, { lockPath, ...context });
- return false;
- }
- }
-
- log.debug("subagent-state:lock-timeout", { lockPath, ...context });
- return false;
-}
-
-function releaseLock(lockPath: string, context: Record): void {
- try {
- rmSync(lockPath, { force: true });
- } catch (error) {
- logCaughtError(log, "subagent-state:release-lock-failed", error, { lockPath, ...context });
- }
-}
-
-function withLock(
- lockPath: string,
- context: Record,
- fallback: T,
- action: () => T,
-): T {
- if (!acquireLock(lockPath, context)) {
- return fallback;
- }
-
- try {
- return action();
- } finally {
- releaseLock(lockPath, context);
- }
-}
-
-function isPendingLaunch(value: unknown): value is PendingLaunch {
- if (typeof value !== "object" || value === null || Array.isArray(value)) {
- return false;
- }
-
- const record = value as Record;
- if (
- typeof record.description !== "string" ||
- typeof record.prompt !== "string" ||
- typeof record.subagent_type !== "string" ||
- typeof record.createdAt !== "number" ||
- !Number.isFinite(record.createdAt)
- ) {
- return false;
- }
-
- if ("resume" in record && typeof record.resume !== "string" && typeof record.resume !== "undefined") {
- return false;
- }
-
- if ("name" in record && typeof record.name !== "string" && typeof record.name !== "undefined") {
- return false;
- }
-
- return true;
-}
-
-function parsePendingLaunchLine(line: string, filePath: string): PendingLaunch | null {
- if (line.trim() === "") return null;
-
- try {
- const parsed = JSON.parse(line) as unknown;
- if (isPendingLaunch(parsed)) {
- return parsed;
- }
-
- log.debug("subagent-state:invalid-pending-launch-record", { filePath, line });
- return null;
- } catch (error) {
- logCaughtError(log, "subagent-state:parse-pending-launch-line-failed", error, { filePath, line });
- return null;
- }
-}
-
-function readPendingLaunchFile(filePath: string): PendingLaunch[] {
- try {
- const content = readFileSync(filePath, "utf-8");
- return content
- .split("\n")
- .map((line) => parsePendingLaunchLine(line, filePath))
- .filter((launch): launch is PendingLaunch => launch !== null);
- } catch (error) {
- if (isNodeErrorCode(error, "ENOENT")) {
- return [];
- }
-
- logCaughtError(log, "subagent-state:read-pending-launch-file-failed", error, { filePath });
- return [];
- }
-}
-
-function isPendingLaunchExpired(launch: PendingLaunch, now: number): boolean {
- return now - launch.createdAt > PENDING_LAUNCH_TTL_MS;
-}
-
-function serializePendingLaunches(launches: PendingLaunch[]): string {
- if (launches.length === 0) {
- return "";
- }
-
- return `${launches.map((launch) => JSON.stringify(launch)).join("\n")}\n`;
-}
-
-function writeFileAtomically(path: string, content: string, context: Record): void {
- const tempPath = `${path}.${process.pid}.${Date.now()}.tmp`;
- try {
- mkdirSync(dirname(path), { recursive: true });
- writeFileSync(tempPath, content, "utf-8");
- renameSync(tempPath, path);
- } catch (error) {
- logCaughtError(log, "subagent-state:atomic-write-failed", error, { path, tempPath, ...context });
- try {
- rmSync(tempPath, { force: true });
- } catch {}
- }
-}
-
-export function listPendingLaunches(sessionId: string): PendingLaunch[] {
- const now = Date.now();
- return readPendingLaunchFile(pendingLaunchPath(sessionId))
- .filter((launch) => !isPendingLaunchExpired(launch, now))
- .sort((left, right) => left.createdAt - right.createdAt);
-}
-
-export function claimPendingLaunch(sessionId: string, agentType: string): PendingLaunch | null {
- const filePath = pendingLaunchPath(sessionId);
- const lockPath = pendingLaunchLockPath(sessionId);
-
- return withLock(lockPath, { sessionId, agentType, filePath, operation: "claim" }, null, () => {
- const now = Date.now();
- const launches = readPendingLaunchFile(filePath);
- const activeLaunches = launches.filter((launch) => !isPendingLaunchExpired(launch, now));
- const hadExpiredLaunches = activeLaunches.length !== launches.length;
-
- let claimedLaunch: PendingLaunch | null = null;
- let claimedIndex = -1;
-
- for (const [index, launch] of activeLaunches.entries()) {
- if (launch.subagent_type !== agentType) {
- continue;
- }
-
- if (claimedLaunch === null || launch.createdAt < claimedLaunch.createdAt) {
- claimedLaunch = launch;
- claimedIndex = index;
- }
- }
-
- if (claimedIndex >= 0) {
- activeLaunches.splice(claimedIndex, 1);
- }
-
- if (claimedLaunch !== null || hadExpiredLaunches) {
- writeFileAtomically(filePath, serializePendingLaunches(activeLaunches), {
- sessionId,
- agentType,
- filePath,
- claimed: claimedLaunch !== null,
- });
- }
-
- return claimedLaunch;
- });
-}
-
-export function appendPendingLaunch(sessionId: string, launch: PendingLaunch): void {
- const filePath = pendingLaunchPath(sessionId);
- const lockPath = pendingLaunchLockPath(sessionId);
-
- withLock(lockPath, { sessionId, filePath, operation: "append" }, undefined, () => {
- try {
- mkdirSync(dirname(filePath), { recursive: true });
- appendFileSync(filePath, `${JSON.stringify(launch)}\n`, "utf-8");
- } catch (error) {
- logCaughtError(log, "subagent-state:append-pending-launch-failed", error, { sessionId, filePath });
- }
- });
-}
-
-export function readAgentState(sessionId: string, agentId: string): Record {
- const filePath = agentStatePath(sessionId, agentId);
-
- try {
- const content = readFileSync(filePath, "utf-8").trim();
- if (content === "") {
- return {};
- }
-
- const parsed = JSON.parse(content) as unknown;
- if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
- return parsed as Record;
- }
-
- log.debug("subagent-state:agent-state-not-object", { filePath, agentId, sessionId });
- return {};
- } catch (error) {
- if (!isNodeErrorCode(error, "ENOENT")) {
- logCaughtError(log, "subagent-state:read-agent-state-failed", error, { filePath, agentId, sessionId });
- }
- return {};
- }
-}
-
-export function writeAgentState(sessionId: string, agentId: string, state: Record): void {
- const filePath = agentStatePath(sessionId, agentId);
- writeFileAtomically(filePath, `${JSON.stringify(state)}\n`, { sessionId, agentId, filePath });
-}
diff --git a/hooks/src/subagent-stop-sync.mts b/hooks/src/subagent-stop-sync.mts
deleted file mode 100644
index cccc65a..0000000
--- a/hooks/src/subagent-stop-sync.mts
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/env node
-/**
- * SubagentStop hook: writes agent metadata to an aggregate ledger file
- * for observability.
- *
- * Input: JSON on stdin with { session_id, cwd, agent_id, agent_type,
- * agent_transcript_path, last_assistant_message, hook_event_name }
- * Output: empty (no stdout output needed)
- *
- * Appends a JSONL record to /vercel-plugin--subagent-ledger.jsonl
- * so the session-end-cleanup hook (or external tools) can inspect subagent history.
- */
-
-import { appendFileSync, mkdirSync } from "node:fs";
-import { readFileSync } from "node:fs";
-import { resolve } from "node:path";
-import { tmpdir } from "node:os";
-import { dirname } from "node:path";
-import { fileURLToPath } from "node:url";
-import { listSessionKeys } from "./hook-env.mjs";
-import { createLogger, logCaughtError, type Logger } from "./logger.mjs";
-
-const log: Logger = createLogger();
-
-// ---------------------------------------------------------------------------
-// Input parsing
-// ---------------------------------------------------------------------------
-
-interface SubagentStopInput {
- session_id?: string;
- cwd?: string;
- agent_id?: string;
- agent_type?: string;
- agent_transcript_path?: string;
- last_assistant_message?: string;
- hook_event_name?: string;
-}
-
-function parseInput(): SubagentStopInput | null {
- try {
- const raw = readFileSync(0, "utf8");
- if (!raw.trim()) return null;
- return JSON.parse(raw) as SubagentStopInput;
- } catch {
- return null;
- }
-}
-
-// ---------------------------------------------------------------------------
-// Ledger
-// ---------------------------------------------------------------------------
-
-interface LedgerEntry {
- timestamp: string;
- session_id: string;
- agent_id: string;
- agent_type: string;
- agent_transcript_path?: string;
-}
-
-function ledgerPath(sessionId: string): string {
- return resolve(tmpdir(), `vercel-plugin-${sessionId}-subagent-ledger.jsonl`);
-}
-
-function appendLedger(entry: LedgerEntry): void {
- const path = ledgerPath(entry.session_id);
- try {
- appendFileSync(path, JSON.stringify(entry) + "\n", "utf-8");
- } catch (error) {
- logCaughtError(log, "subagent-stop-sync:append-ledger-failed", error, { path });
- }
-}
-
-// ---------------------------------------------------------------------------
-// Main
-// ---------------------------------------------------------------------------
-
-function main(): void {
- const input = parseInput();
- if (!input) {
- process.exit(0);
- }
-
- const sessionId = input.session_id;
- if (!sessionId) {
- process.exit(0);
- }
-
- const agentId = input.agent_id ?? "unknown";
- const agentType = input.agent_type ?? "unknown";
-
- log.debug("subagent-stop-sync", { sessionId, agentId, agentType });
-
- let ledgerEntryWritten = false;
- try {
- appendLedger({
- timestamp: new Date().toISOString(),
- session_id: sessionId,
- agent_id: agentId,
- agent_type: agentType,
- agent_transcript_path: input.agent_transcript_path,
- });
- ledgerEntryWritten = true;
- } catch (error) {
- logCaughtError(log, "subagent-stop-sync:ledger-write-failed", error, {
- sessionId,
- agentId,
- });
- }
-
- // Count skills injected for this agent by reading the scoped claim dir
- let skillsInjected = 0;
- try {
- const claimed = listSessionKeys(sessionId, "seen-skills", agentId !== "unknown" ? agentId : undefined);
- skillsInjected = claimed.length;
- } catch {
- // Non-critical — claim dir may not exist if no skills were injected
- }
-
- log.summary("subagent-stop-sync:complete", {
- agent_id: agentId,
- agent_type: agentType,
- skills_injected: skillsInjected,
- ledger_entry_written: ledgerEntryWritten,
- });
-
- process.exit(0);
-}
-
-const ENTRYPOINT = fileURLToPath(import.meta.url);
-const isEntrypoint = process.argv[1]
- ? resolve(process.argv[1]) === ENTRYPOINT
- : false;
-
-if (isEntrypoint) {
- main();
-}
-
-// Exports for testing
-export { parseInput, appendLedger, ledgerPath, main };
-export type { SubagentStopInput, LedgerEntry };
diff --git a/hooks/src/telemetry.mts b/hooks/src/telemetry.mts
index 50e375b..f1c42bd 100644
--- a/hooks/src/telemetry.mts
+++ b/hooks/src/telemetry.mts
@@ -1,15 +1,12 @@
import { randomUUID } from "node:crypto";
-import { mkdirSync, readFileSync, writeFileSync } from "node:fs";
+import { mkdirSync, statSync, writeFileSync } from "node:fs";
import { join, dirname } from "node:path";
import { homedir } from "node:os";
-const MAX_VALUE_BYTES = 100_000;
-const TRUNCATION_SUFFIX = "[TRUNCATED]";
-
const BRIDGE_ENDPOINT = "https://telemetry.vercel.com/api/vercel-plugin/v1/events";
const FLUSH_TIMEOUT_MS = 3_000;
-const DEVICE_ID_PATH = join(homedir(), ".claude", "vercel-plugin-device-id");
+const DAU_STAMP_PATH = join(homedir(), ".config", "vercel-plugin", "dau-stamp");
export interface TelemetryEvent {
id: string;
@@ -18,72 +15,64 @@ export interface TelemetryEvent {
value: string;
}
-function truncateValue(value: string): string {
- if (Buffer.byteLength(value, "utf-8") <= MAX_VALUE_BYTES) {
- return value;
- }
- const truncated = Buffer.from(value, "utf-8").subarray(0, MAX_VALUE_BYTES).toString("utf-8");
- return truncated + TRUNCATION_SUFFIX;
-}
-
-async function send(sessionId: string, events: TelemetryEvent[]): Promise {
- if (events.length === 0) return;
+async function sendDau(events: TelemetryEvent[]): Promise {
+ if (events.length === 0) return false;
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), FLUSH_TIMEOUT_MS);
try {
- await fetch(BRIDGE_ENDPOINT, {
+ const response = await fetch(BRIDGE_ENDPOINT, {
method: "POST",
headers: {
"Content-Type": "application/json",
- "x-vercel-plugin-session-id": sessionId,
- "x-vercel-plugin-topic-id": "generic",
+ "x-vercel-plugin-topic-id": "dau",
},
body: JSON.stringify(events),
signal: controller.signal,
});
+ return response.ok;
} catch {
- // Best-effort
+ return false;
} finally {
clearTimeout(timeout);
}
}
// ---------------------------------------------------------------------------
-// Device ID — stable anonymous identifier per machine (always-on)
+// DAU stamp — local once-per-day throttle (always-on unless opted out)
// ---------------------------------------------------------------------------
-/**
- * Returns a stable anonymous device ID. Creates one on first call.
- * The ID is a random UUID stored at ~/.claude/vercel-plugin-device-id
- * and is not tied to any user account or PII.
- */
-export function getOrCreateDeviceId(): string {
+export function getDauStampPath(): string {
+ return DAU_STAMP_PATH;
+}
+
+function utcDayStamp(date: Date): string {
+ return date.toISOString().slice(0, 10);
+}
+
+export function shouldSendDauPing(now: Date = new Date()): boolean {
try {
- const existing = readFileSync(DEVICE_ID_PATH, "utf-8").trim();
- if (existing.length > 0) return existing;
+ const existingMtime = statSync(DAU_STAMP_PATH).mtime;
+ return utcDayStamp(existingMtime) !== utcDayStamp(now);
} catch {
- // File doesn't exist yet
+ return true;
}
+}
- const deviceId = randomUUID();
+export function markDauPingSent(now: Date = new Date()): void {
+ void now;
try {
- mkdirSync(dirname(DEVICE_ID_PATH), { recursive: true });
- writeFileSync(DEVICE_ID_PATH, deviceId);
+ mkdirSync(dirname(DAU_STAMP_PATH), { recursive: true });
+ writeFileSync(DAU_STAMP_PATH, "", { flag: "w" });
} catch {
- // Best-effort — return the generated ID even if we can't persist it
+ // Best-effort
}
- return deviceId;
}
// ---------------------------------------------------------------------------
-// Telemetry tiers
+// Telemetry controls
// ---------------------------------------------------------------------------
-/**
- * Content-level telemetry (opt-in): requires explicit user consent.
- * Currently gates prompt:text only.
- */
export function getTelemetryOverride(env: NodeJS.ProcessEnv = process.env): "off" | null {
const value = env.VERCEL_PLUGIN_TELEMETRY?.trim().toLowerCase();
if (value === "off") return value;
@@ -91,115 +80,29 @@ export function getTelemetryOverride(env: NodeJS.ProcessEnv = process.env): "off
}
/**
- * Base telemetry is enabled by default, but users can disable all telemetry
- * with VERCEL_PLUGIN_TELEMETRY=off.
+ * DAU telemetry is enabled by default, but users can disable all telemetry with
+ * VERCEL_PLUGIN_TELEMETRY=off.
*/
-export function isBaseTelemetryEnabled(env: NodeJS.ProcessEnv = process.env): boolean {
+export function isDauTelemetryEnabled(env: NodeJS.ProcessEnv = process.env): boolean {
return getTelemetryOverride(env) !== "off";
}
-/**
- * Content-level telemetry (opt-in): requires explicit user consent.
- * VERCEL_PLUGIN_TELEMETRY=off disables it entirely.
- */
-export function isContentTelemetryEnabled(env: NodeJS.ProcessEnv = process.env): boolean {
- const override = getTelemetryOverride(env);
- if (override === "off") return false;
-
- try {
- const prefPath = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
- const pref = readFileSync(prefPath, "utf-8").trim();
- return pref === "enabled";
- } catch {
- return false;
- }
-}
-
-/**
- * Backward-compatible alias for older callers that still refer to prompt telemetry.
- */
-export function isPromptTelemetryEnabled(env: NodeJS.ProcessEnv = process.env): boolean {
- return isContentTelemetryEnabled(env);
-}
-
// ---------------------------------------------------------------------------
-// Always-on base telemetry (session, tool, skill injection events)
+// DAU telemetry (default-on, opt-out via VERCEL_PLUGIN_TELEMETRY=off)
// ---------------------------------------------------------------------------
-export async function trackBaseEvent(sessionId: string, key: string, value: string): Promise {
- if (!isBaseTelemetryEnabled()) return;
-
- const event: TelemetryEvent = {
- id: randomUUID(),
- event_time: Date.now(),
- key,
- value: truncateValue(value),
- };
-
- await send(sessionId, [event]);
-}
-
-export async function trackBaseEvents(
- sessionId: string,
- entries: Array<{ key: string; value: string }>,
-): Promise {
- if (!isBaseTelemetryEnabled() || entries.length === 0) return;
+export async function trackDauActiveToday(now: Date = new Date()): Promise {
+ if (!isDauTelemetryEnabled() || !shouldSendDauPing(now)) return;
- const now = Date.now();
- const events: TelemetryEvent[] = entries.map((entry) => ({
+ const eventTime = now.getTime();
+ const sent = await sendDau([{
id: randomUUID(),
- event_time: now,
- key: entry.key,
- value: truncateValue(entry.value),
- }));
+ event_time: eventTime,
+ key: "dau:active_today",
+ value: "1",
+ }]);
- await send(sessionId, events);
-}
-
-// ---------------------------------------------------------------------------
-// Opt-in telemetry (raw prompt content)
-// ---------------------------------------------------------------------------
-
-export async function trackContentEvent(sessionId: string, key: string, value: string): Promise {
- if (!isContentTelemetryEnabled()) return;
-
- const event: TelemetryEvent = {
- id: randomUUID(),
- event_time: Date.now(),
- key,
- value: truncateValue(value),
- };
-
- await send(sessionId, [event]);
-}
-
-export async function trackContentEvents(
- sessionId: string,
- entries: Array<{ key: string; value: string }>,
-): Promise {
- if (!isContentTelemetryEnabled() || entries.length === 0) return;
-
- const now = Date.now();
- const events: TelemetryEvent[] = entries.map((entry) => ({
- id: randomUUID(),
- event_time: now,
- key: entry.key,
- value: truncateValue(entry.value),
- }));
-
- await send(sessionId, events);
-}
-
-/**
- * Backward-compatible aliases for older callers that still refer to prompt telemetry.
- */
-export async function trackEvent(sessionId: string, key: string, value: string): Promise {
- await trackContentEvent(sessionId, key, value);
-}
-
-export async function trackEvents(
- sessionId: string,
- entries: Array<{ key: string; value: string }>,
-): Promise {
- await trackContentEvents(sessionId, entries);
+ if (sent) {
+ markDauPingSent(now);
+ }
}
diff --git a/hooks/src/user-prompt-submit-skill-inject.mts b/hooks/src/user-prompt-submit-skill-inject.mts
index 81c12d3..0c9f3a4 100644
--- a/hooks/src/user-prompt-submit-skill-inject.mts
+++ b/hooks/src/user-prompt-submit-skill-inject.mts
@@ -47,7 +47,6 @@ import { analyzePrompt } from "./prompt-analysis.mjs";
import type { PromptAnalysisReport } from "./prompt-analysis.mjs";
import { createLogger, logDecision } from "./logger.mjs";
import type { Logger } from "./logger.mjs";
-import { trackBaseEvents } from "./telemetry.mjs";
import { selectManagedContextChunk } from "./vercel-context.mjs";
const MAX_SKILLS = 2;
@@ -838,9 +837,6 @@ export function run(): string {
const { prompt, sessionId, cwd } = parsed;
const promptEnvBefore = capturePromptEnvSnapshot();
- // prompt:text telemetry is handled by user-prompt-submit-telemetry.mts
- // where it is awaited before process.exit(), ensuring reliable delivery.
-
const normalizedPrompt = normalizePromptText(prompt);
if (!normalizedPrompt) {
@@ -1050,20 +1046,7 @@ export function run(): string {
droppedByCap,
droppedByBudget,
}, cwd);
- }
- // Base telemetry — enabled by default unless VERCEL_PLUGIN_TELEMETRY=off
- if (sessionId && loaded.length > 0) {
- const telemetryEntries: Array<{ key: string; value: string }> = [];
- for (const skill of loaded) {
- const r = report.perSkillResults[skill];
- telemetryEntries.push(
- { key: "prompt:skill", value: skill },
- { key: "prompt:score", value: String(r?.score ?? 0) },
- { key: "prompt:hook", value: "UserPromptSubmit" },
- );
- }
- trackBaseEvents(sessionId, telemetryEntries).catch(() => {});
}
let outputEnv: Record | undefined;
diff --git a/hooks/src/user-prompt-submit-telemetry.mts b/hooks/src/user-prompt-submit-telemetry.mts
deleted file mode 100644
index 50944aa..0000000
--- a/hooks/src/user-prompt-submit-telemetry.mts
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env node
-/**
- * UserPromptSubmit hook: prompt telemetry opt-in + prompt text tracking.
- *
- * Fires on every user message. Two responsibilities:
- *
- * 1. Track prompt:text telemetry (awaited) for every prompt >= 10 chars
- * when prompt telemetry is enabled. This runs independently of skill
- * matching so prompts are never silently dropped.
- *
- * 2. On the first message of a session where the user hasn't recorded a
- * prompt telemetry preference, return additionalContext asking the model
- * to prompt the user for opt-in. Writes "asked" immediately so the user
- * is never re-prompted. session-end-cleanup converts "asked" → "disabled".
- *
- * Note: Base telemetry is enabled by default, but users can disable all
- * telemetry with VERCEL_PLUGIN_TELEMETRY=off. This hook only gates prompt
- * text collection when telemetry is otherwise enabled.
- *
- * Input: JSON on stdin with { session_id, prompt }
- * Output: JSON on stdout with { hookSpecificOutput: { hookEventName, additionalContext } } or {}
- */
-
-import type { SyncHookJSONOutput } from "@anthropic-ai/claude-agent-sdk";
-import { existsSync, readFileSync, writeFileSync, mkdirSync } from "node:fs";
-import { homedir, tmpdir } from "node:os";
-import { join, dirname } from "node:path";
-import { getTelemetryOverride, isContentTelemetryEnabled, trackContentEvents } from "./telemetry.mjs";
-
-const PREF_PATH = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
-const MIN_PROMPT_LENGTH = 10;
-
-function parseStdin(): Record | null {
- try {
- const raw = readFileSync(0, "utf-8").trim();
- if (!raw) return null;
- return JSON.parse(raw);
- } catch {
- return null;
- }
-}
-
-function resolveSessionId(input: Record): string {
- return (input.session_id as string) || (input.conversation_id as string) || "";
-}
-
-function resolvePrompt(input: Record): string {
- return (input.prompt as string) || (input.message as string) || "";
-}
-
-async function main(): Promise {
- const input = parseStdin();
- const sessionId = input ? resolveSessionId(input) : "";
- const prompt = input ? resolvePrompt(input) : "";
- const telemetryOverride = getTelemetryOverride();
-
- // Prompt text tracking — opt-in only
- if (isContentTelemetryEnabled() && sessionId && prompt.length >= MIN_PROMPT_LENGTH) {
- await trackContentEvents(sessionId, [
- { key: "prompt:text", value: prompt },
- ]).catch(() => {});
- }
-
- // A global kill switch means the user has already disabled telemetry.
- if (telemetryOverride === "off") {
- process.stdout.write("{}");
- process.exit(0);
- }
-
- // Already opted in, out, or asked — nothing more to do
- try {
- const pref = readFileSync(PREF_PATH, "utf-8").trim();
- if (pref === "enabled" || pref === "disabled" || pref === "asked") {
- process.stdout.write("{}");
- process.exit(0);
- }
- } catch {
- // File doesn't exist — user hasn't decided yet
- }
-
- // Once-per-session guard: don't ask again after the first message
- if (sessionId) {
- const flagPath = join(tmpdir(), `vercel-plugin-telemetry-asked-${sessionId}`);
- if (existsSync(flagPath)) {
- process.stdout.write("{}");
- process.exit(0);
- }
- try {
- mkdirSync(dirname(flagPath), { recursive: true });
- writeFileSync(flagPath, "1", { flag: "wx" });
- } catch {
- // Race or permission issue — skip this time
- process.stdout.write("{}");
- process.exit(0);
- }
- }
-
- // Write "asked" immediately so we never re-prompt across sessions
- try {
- mkdirSync(dirname(PREF_PATH), { recursive: true });
- writeFileSync(PREF_PATH, "asked");
- } catch {
- // If we can't write, fall through — tmpdir flag still guards this session
- }
-
- const message = [
- "After responding to the user's message, use the AskUserQuestion tool to ask about telemetry.",
- "Use this exact question configuration:",
- '- question: "The Vercel plugin collects anonymous usage data such as skill injection patterns and tools used by default. Would you like to also share your prompt text to help improve skill matching? This choice only controls prompt telemetry."',
- '- header: "Telemetry"',
- '- options: [{"label": "Share prompts", "description": "Share prompt text in addition to default anonymous usage data"}, {"label": "No thanks", "description": "Keep prompt text private. This only disables prompt telemetry"}]',
- "- multiSelect: false",
- "",
- "After the user responds:",
- "- If they chose \"Share prompts\", run: `echo 'enabled' > ~/.claude/vercel-plugin-telemetry-preference`",
- "- If they chose \"No thanks\" or anything else, run: `echo 'disabled' > ~/.claude/vercel-plugin-telemetry-preference`",
- ].join("\n");
-
- const output: SyncHookJSONOutput = {
- hookSpecificOutput: {
- hookEventName: "UserPromptSubmit" as const,
- additionalContext: message,
- },
- };
-
- process.stdout.write(JSON.stringify(output));
- process.exit(0);
-}
-
-main();
diff --git a/hooks/subagent-start-bootstrap.mjs b/hooks/subagent-start-bootstrap.mjs
deleted file mode 100755
index db47d9a..0000000
--- a/hooks/subagent-start-bootstrap.mjs
+++ /dev/null
@@ -1,286 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/subagent-start-bootstrap.mts
-import { readFileSync } from "fs";
-import { join, resolve } from "path";
-import { fileURLToPath } from "url";
-import { pluginRoot as resolvePluginRoot, profileCachePath, safeReadFile, safeReadJson, tryClaimSessionKey } from "./hook-env.mjs";
-import { createLogger, logCaughtError } from "./logger.mjs";
-import { compilePromptSignals, matchPromptWithReason, normalizePromptText } from "./prompt-patterns.mjs";
-import { loadSkills } from "./pretooluse-skill-inject.mjs";
-import { extractFrontmatter } from "./skill-map-frontmatter.mjs";
-import { claimPendingLaunch } from "./subagent-state.mjs";
-var PLUGIN_ROOT = resolvePluginRoot();
-var MINIMAL_BUDGET_BYTES = 1024;
-var LIGHT_BUDGET_BYTES = 3072;
-var STANDARD_BUDGET_BYTES = 8e3;
-var log = createLogger();
-function parseInput() {
- try {
- const raw = readFileSync(0, "utf8");
- if (!raw.trim()) return null;
- return JSON.parse(raw);
- } catch {
- return null;
- }
-}
-function getLikelySkills(sessionId) {
- if (sessionId) {
- const cache = safeReadJson(profileCachePath(sessionId));
- if (cache && Array.isArray(cache.likelySkills) && cache.likelySkills.length > 0) {
- log.debug("subagent-start-bootstrap:profile-cache-hit", { sessionId, skills: cache.likelySkills });
- return cache.likelySkills;
- }
- log.debug("subagent-start-bootstrap:profile-cache-miss", { sessionId });
- }
- const raw = process.env.VERCEL_PLUGIN_LIKELY_SKILLS;
- if (!raw || raw.trim() === "") return [];
- return raw.split(",").map((s) => s.trim()).filter(Boolean);
-}
-function resolveBudgetCategory(agentType) {
- if (agentType === "Explore") return "minimal";
- if (agentType === "Plan") return "light";
- return "standard";
-}
-function budgetBytesForCategory(category) {
- switch (category) {
- case "minimal":
- return MINIMAL_BUDGET_BYTES;
- case "light":
- return LIGHT_BUDGET_BYTES;
- case "standard":
- return STANDARD_BUDGET_BYTES;
- }
-}
-function getPromptMatchedSkills(promptText) {
- const normalizedPrompt = normalizePromptText(promptText);
- if (!normalizedPrompt) return [];
- try {
- const loaded = loadSkills(PLUGIN_ROOT, log);
- if (!loaded) return [];
- const matches = [];
- for (const [skill, config] of Object.entries(loaded.skillMap)) {
- if (!config.promptSignals) continue;
- const compiled = compilePromptSignals(config.promptSignals);
- const result = matchPromptWithReason(normalizedPrompt, compiled);
- if (!result.matched) continue;
- matches.push({
- skill,
- score: result.score,
- priority: config.priority
- });
- }
- matches.sort((left, right) => {
- if (right.score !== left.score) return right.score - left.score;
- if (right.priority !== left.priority) return right.priority - left.priority;
- return left.skill.localeCompare(right.skill);
- });
- log.debug("subagent-start-bootstrap:prompt-skill-match", {
- promptLength: promptText.length,
- matchedSkills: matches.map(({ skill, score }) => ({ skill, score }))
- });
- return matches;
- } catch (error) {
- logCaughtError(log, "subagent-start-bootstrap:prompt-skill-match-failed", error, {
- promptLength: promptText.length
- });
- return [];
- }
-}
-function mergeLikelySkills(likelySkills, promptMatchedSkills) {
- if (promptMatchedSkills.length === 0) return likelySkills;
- const promptSkillNames = promptMatchedSkills.map((entry) => entry.skill);
- return [.../* @__PURE__ */ new Set([...promptSkillNames, ...likelySkills])];
-}
-function resolveLikelySkillsFromPendingLaunch(sessionId, agentType, likelySkills) {
- if (!sessionId) return likelySkills;
- try {
- const pendingLaunch = claimPendingLaunch(sessionId, agentType);
- if (!pendingLaunch) {
- log.debug("subagent-start-bootstrap:pending-launch", {
- sessionId,
- agentType,
- claimedLaunch: false,
- likelySkills
- });
- return likelySkills;
- }
- const promptText = `${pendingLaunch.description} ${pendingLaunch.prompt}`.trim();
- const promptMatchedSkills = getPromptMatchedSkills(promptText);
- const effectiveLikelySkills = mergeLikelySkills(likelySkills, promptMatchedSkills);
- log.debug("subagent-start-bootstrap:pending-launch", {
- sessionId,
- agentType,
- claimedLaunch: true,
- promptMatchedSkills: promptMatchedSkills.map(({ skill, score }) => ({ skill, score })),
- likelySkills: effectiveLikelySkills
- });
- return effectiveLikelySkills;
- } catch (error) {
- logCaughtError(log, "subagent-start-bootstrap:pending-launch-route-failed", error, {
- sessionId,
- agentType,
- likelySkills
- });
- return likelySkills;
- }
-}
-function profileLine(agentType, likelySkills) {
- return "Vercel plugin active. Project likely uses: " + (likelySkills.length > 0 ? likelySkills.join(", ") : "unknown stack") + ".";
-}
-function buildMinimalContext(agentType, likelySkills) {
- const parts = [];
- parts.push(``);
- parts.push(profileLine(agentType, likelySkills));
- parts.push("");
- return parts.join("\n");
-}
-function buildLightContext(agentType, likelySkills, budgetBytes) {
- const parts = [];
- parts.push(``);
- parts.push(profileLine(agentType, likelySkills));
- let usedBytes = Buffer.byteLength(parts.join("\n"), "utf8");
- const loaded = loadSkills(PLUGIN_ROOT, log);
- if (loaded) {
- for (const skill of likelySkills) {
- const config = loaded.skillMap[skill];
- if (!config) continue;
- const summary = config.summary;
- if (!summary) continue;
- const line = `- **${skill}**: ${summary}`;
- const lineBytes = Buffer.byteLength(line, "utf8");
- if (usedBytes + lineBytes + 1 > budgetBytes) break;
- parts.push(line);
- usedBytes += lineBytes + 1;
- }
- }
- const constraints = [
- "Deployment targets Vercel. Use framework conventions (e.g. Next.js app router, API routes).",
- "Environment variables are managed via `vercel env`. Do not hardcode secrets."
- ];
- for (const constraint of constraints) {
- const lineBytes = Buffer.byteLength(constraint, "utf8");
- if (usedBytes + lineBytes + 1 > budgetBytes) break;
- parts.push(constraint);
- usedBytes += lineBytes + 1;
- }
- parts.push("");
- return parts.join("\n");
-}
-function buildStandardContext(agentType, likelySkills, budgetBytes) {
- const parts = [];
- parts.push(``);
- parts.push(profileLine(agentType, likelySkills));
- let usedBytes = Buffer.byteLength(parts.join("\n"), "utf8");
- const loaded = loadSkills(PLUGIN_ROOT, log);
- for (const skill of likelySkills) {
- const skillPath = join(PLUGIN_ROOT, "skills", skill, "SKILL.md");
- const raw = safeReadFile(skillPath);
- if (raw !== null) {
- const { body } = extractFrontmatter(raw);
- const content = body.trimStart();
- const wrapped = `
-${content}
-`;
- const byteLen = Buffer.byteLength(wrapped, "utf8");
- if (usedBytes + byteLen + 1 <= budgetBytes) {
- parts.push(wrapped);
- usedBytes += byteLen + 1;
- continue;
- }
- }
- const summary = loaded?.skillMap[skill]?.summary;
- if (summary) {
- const line = `
-${summary}
-`;
- const lineBytes = Buffer.byteLength(line, "utf8");
- if (usedBytes + lineBytes + 1 <= budgetBytes) {
- parts.push(line);
- usedBytes += lineBytes + 1;
- }
- }
- }
- parts.push("");
- return parts.join("\n");
-}
-function main() {
- const input = parseInput();
- if (!input) {
- process.exit(0);
- }
- const agentId = input.agent_id ?? "unknown";
- const agentType = input.agent_type ?? "unknown";
- const sessionId = input.session_id;
- log.debug("subagent-start-bootstrap", { agentId, agentType, sessionId });
- const profilerLikelySkills = getLikelySkills(sessionId);
- const likelySkills = resolveLikelySkillsFromPendingLaunch(
- sessionId,
- agentType,
- profilerLikelySkills
- );
- const category = resolveBudgetCategory(agentType);
- const maxBytes = budgetBytesForCategory(category);
- let context;
- switch (category) {
- case "minimal":
- context = buildMinimalContext(agentType, likelySkills);
- break;
- case "light":
- context = buildLightContext(agentType, likelySkills, maxBytes);
- break;
- case "standard":
- context = buildStandardContext(agentType, likelySkills, maxBytes);
- break;
- }
- if (Buffer.byteLength(context, "utf8") > maxBytes) {
- context = Buffer.from(context, "utf8").subarray(0, maxBytes).toString("utf8");
- }
- const scopeId = agentId !== "unknown" ? agentId : void 0;
- if (sessionId && likelySkills.length > 0) {
- const claimed = [];
- for (const skill of likelySkills) {
- if (tryClaimSessionKey(sessionId, "seen-skills", skill, scopeId)) {
- claimed.push(skill);
- }
- }
- if (claimed.length > 0) {
- log.debug("subagent-start-bootstrap:dedup-claims", { sessionId, agentId, scopeId, claimed });
- }
- }
- const budgetUsed = Buffer.byteLength(context, "utf8");
- const pendingLaunchMatched = likelySkills.length !== profilerLikelySkills.length || likelySkills.some((s) => !profilerLikelySkills.includes(s));
- log.summary("subagent-start-bootstrap:complete", {
- agent_id: agentId,
- agent_type: agentType,
- claimed_skills: likelySkills.length,
- budget_used: budgetUsed,
- budget_max: maxBytes,
- budget_category: category,
- pending_launch_matched: pendingLaunchMatched
- });
- const output = {
- hookSpecificOutput: {
- hookEventName: "SubagentStart",
- additionalContext: context
- }
- };
- process.stdout.write(JSON.stringify(output));
- process.exit(0);
-}
-var ENTRYPOINT = fileURLToPath(import.meta.url);
-var isEntrypoint = process.argv[1] ? resolve(process.argv[1]) === ENTRYPOINT : false;
-if (isEntrypoint) {
- main();
-}
-export {
- LIGHT_BUDGET_BYTES,
- MINIMAL_BUDGET_BYTES,
- STANDARD_BUDGET_BYTES,
- buildLightContext,
- buildMinimalContext,
- buildStandardContext,
- getLikelySkills,
- main,
- parseInput
-};
diff --git a/hooks/subagent-state.mjs b/hooks/subagent-state.mjs
deleted file mode 100644
index 4005c36..0000000
--- a/hooks/subagent-state.mjs
+++ /dev/null
@@ -1,251 +0,0 @@
-// hooks/src/subagent-state.mts
-import { createHash } from "crypto";
-import {
- appendFileSync,
- closeSync,
- mkdirSync,
- openSync,
- readFileSync,
- renameSync,
- rmSync,
- statSync,
- writeFileSync
-} from "fs";
-import { tmpdir } from "os";
-import { dirname, join, resolve } from "path";
-import * as hookEnvNs from "./hook-env.mjs";
-import { createLogger, logCaughtError } from "./logger.mjs";
-var PENDING_LAUNCH_TTL_MS = 6e4;
-var LOCK_WAIT_TIMEOUT_MS = 2e3;
-var LOCK_WAIT_INTERVAL_MS = 10;
-var LOCK_STALE_MS = 5e3;
-var hookEnv = hookEnvNs;
-var log = createLogger();
-function isNodeErrorCode(error, code) {
- return typeof error === "object" && error !== null && "code" in error && error.code === code;
-}
-function sleepMs(ms) {
- const deadline = Date.now() + ms;
- while (Date.now() < deadline) {
- }
-}
-function resolveTmpRoot() {
- try {
- const tempRoot = hookEnv.getTmpDir?.();
- if (typeof tempRoot === "string" && tempRoot.trim() !== "") {
- return resolve(tempRoot);
- }
- } catch (error) {
- logCaughtError(log, "subagent-state:get-tmp-dir-failed", error, {});
- }
- return resolve(tmpdir());
-}
-function pendingLaunchPath(sessionId) {
- return join(resolveTmpRoot(), `vercel-plugin-${sessionId}-pending-launches.jsonl`);
-}
-function pendingLaunchLockPath(sessionId) {
- return `${pendingLaunchPath(sessionId)}.lock`;
-}
-function agentStatePath(sessionId, agentId) {
- const agentHash = createHash("sha256").update(agentId).digest("hex");
- return join(resolveTmpRoot(), `vercel-plugin-${sessionId}-agent-${agentHash}.json`);
-}
-function maybeClearStaleLock(lockPath, context) {
- try {
- const stats = statSync(lockPath);
- if (Date.now() - stats.mtimeMs > LOCK_STALE_MS) {
- rmSync(lockPath, { force: true });
- log.debug("subagent-state:stale-lock-cleared", { lockPath, ...context });
- }
- } catch (error) {
- if (!isNodeErrorCode(error, "ENOENT")) {
- logCaughtError(log, "subagent-state:stale-lock-check-failed", error, { lockPath, ...context });
- }
- }
-}
-function acquireLock(lockPath, context) {
- mkdirSync(dirname(lockPath), { recursive: true });
- const deadline = Date.now() + LOCK_WAIT_TIMEOUT_MS;
- while (Date.now() <= deadline) {
- try {
- const fd = openSync(lockPath, "wx");
- closeSync(fd);
- return true;
- } catch (error) {
- if (isNodeErrorCode(error, "EEXIST")) {
- maybeClearStaleLock(lockPath, context);
- sleepMs(LOCK_WAIT_INTERVAL_MS);
- continue;
- }
- logCaughtError(log, "subagent-state:acquire-lock-failed", error, { lockPath, ...context });
- return false;
- }
- }
- log.debug("subagent-state:lock-timeout", { lockPath, ...context });
- return false;
-}
-function releaseLock(lockPath, context) {
- try {
- rmSync(lockPath, { force: true });
- } catch (error) {
- logCaughtError(log, "subagent-state:release-lock-failed", error, { lockPath, ...context });
- }
-}
-function withLock(lockPath, context, fallback, action) {
- if (!acquireLock(lockPath, context)) {
- return fallback;
- }
- try {
- return action();
- } finally {
- releaseLock(lockPath, context);
- }
-}
-function isPendingLaunch(value) {
- if (typeof value !== "object" || value === null || Array.isArray(value)) {
- return false;
- }
- const record = value;
- if (typeof record.description !== "string" || typeof record.prompt !== "string" || typeof record.subagent_type !== "string" || typeof record.createdAt !== "number" || !Number.isFinite(record.createdAt)) {
- return false;
- }
- if ("resume" in record && typeof record.resume !== "string" && typeof record.resume !== "undefined") {
- return false;
- }
- if ("name" in record && typeof record.name !== "string" && typeof record.name !== "undefined") {
- return false;
- }
- return true;
-}
-function parsePendingLaunchLine(line, filePath) {
- if (line.trim() === "") return null;
- try {
- const parsed = JSON.parse(line);
- if (isPendingLaunch(parsed)) {
- return parsed;
- }
- log.debug("subagent-state:invalid-pending-launch-record", { filePath, line });
- return null;
- } catch (error) {
- logCaughtError(log, "subagent-state:parse-pending-launch-line-failed", error, { filePath, line });
- return null;
- }
-}
-function readPendingLaunchFile(filePath) {
- try {
- const content = readFileSync(filePath, "utf-8");
- return content.split("\n").map((line) => parsePendingLaunchLine(line, filePath)).filter((launch) => launch !== null);
- } catch (error) {
- if (isNodeErrorCode(error, "ENOENT")) {
- return [];
- }
- logCaughtError(log, "subagent-state:read-pending-launch-file-failed", error, { filePath });
- return [];
- }
-}
-function isPendingLaunchExpired(launch, now) {
- return now - launch.createdAt > PENDING_LAUNCH_TTL_MS;
-}
-function serializePendingLaunches(launches) {
- if (launches.length === 0) {
- return "";
- }
- return `${launches.map((launch) => JSON.stringify(launch)).join("\n")}
-`;
-}
-function writeFileAtomically(path, content, context) {
- const tempPath = `${path}.${process.pid}.${Date.now()}.tmp`;
- try {
- mkdirSync(dirname(path), { recursive: true });
- writeFileSync(tempPath, content, "utf-8");
- renameSync(tempPath, path);
- } catch (error) {
- logCaughtError(log, "subagent-state:atomic-write-failed", error, { path, tempPath, ...context });
- try {
- rmSync(tempPath, { force: true });
- } catch {
- }
- }
-}
-function listPendingLaunches(sessionId) {
- const now = Date.now();
- return readPendingLaunchFile(pendingLaunchPath(sessionId)).filter((launch) => !isPendingLaunchExpired(launch, now)).sort((left, right) => left.createdAt - right.createdAt);
-}
-function claimPendingLaunch(sessionId, agentType) {
- const filePath = pendingLaunchPath(sessionId);
- const lockPath = pendingLaunchLockPath(sessionId);
- return withLock(lockPath, { sessionId, agentType, filePath, operation: "claim" }, null, () => {
- const now = Date.now();
- const launches = readPendingLaunchFile(filePath);
- const activeLaunches = launches.filter((launch) => !isPendingLaunchExpired(launch, now));
- const hadExpiredLaunches = activeLaunches.length !== launches.length;
- let claimedLaunch = null;
- let claimedIndex = -1;
- for (const [index, launch] of activeLaunches.entries()) {
- if (launch.subagent_type !== agentType) {
- continue;
- }
- if (claimedLaunch === null || launch.createdAt < claimedLaunch.createdAt) {
- claimedLaunch = launch;
- claimedIndex = index;
- }
- }
- if (claimedIndex >= 0) {
- activeLaunches.splice(claimedIndex, 1);
- }
- if (claimedLaunch !== null || hadExpiredLaunches) {
- writeFileAtomically(filePath, serializePendingLaunches(activeLaunches), {
- sessionId,
- agentType,
- filePath,
- claimed: claimedLaunch !== null
- });
- }
- return claimedLaunch;
- });
-}
-function appendPendingLaunch(sessionId, launch) {
- const filePath = pendingLaunchPath(sessionId);
- const lockPath = pendingLaunchLockPath(sessionId);
- withLock(lockPath, { sessionId, filePath, operation: "append" }, void 0, () => {
- try {
- mkdirSync(dirname(filePath), { recursive: true });
- appendFileSync(filePath, `${JSON.stringify(launch)}
-`, "utf-8");
- } catch (error) {
- logCaughtError(log, "subagent-state:append-pending-launch-failed", error, { sessionId, filePath });
- }
- });
-}
-function readAgentState(sessionId, agentId) {
- const filePath = agentStatePath(sessionId, agentId);
- try {
- const content = readFileSync(filePath, "utf-8").trim();
- if (content === "") {
- return {};
- }
- const parsed = JSON.parse(content);
- if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
- return parsed;
- }
- log.debug("subagent-state:agent-state-not-object", { filePath, agentId, sessionId });
- return {};
- } catch (error) {
- if (!isNodeErrorCode(error, "ENOENT")) {
- logCaughtError(log, "subagent-state:read-agent-state-failed", error, { filePath, agentId, sessionId });
- }
- return {};
- }
-}
-function writeAgentState(sessionId, agentId, state) {
- const filePath = agentStatePath(sessionId, agentId);
- writeFileAtomically(filePath, `${JSON.stringify(state)}
-`, { sessionId, agentId, filePath });
-}
-export {
- appendPendingLaunch,
- claimPendingLaunch,
- listPendingLaunches,
- readAgentState,
- writeAgentState
-};
diff --git a/hooks/subagent-stop-sync.mjs b/hooks/subagent-stop-sync.mjs
deleted file mode 100755
index 2dbc072..0000000
--- a/hooks/subagent-stop-sync.mjs
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/subagent-stop-sync.mts
-import { appendFileSync } from "fs";
-import { readFileSync } from "fs";
-import { resolve } from "path";
-import { tmpdir } from "os";
-import { fileURLToPath } from "url";
-import { listSessionKeys } from "./hook-env.mjs";
-import { createLogger, logCaughtError } from "./logger.mjs";
-var log = createLogger();
-function parseInput() {
- try {
- const raw = readFileSync(0, "utf8");
- if (!raw.trim()) return null;
- return JSON.parse(raw);
- } catch {
- return null;
- }
-}
-function ledgerPath(sessionId) {
- return resolve(tmpdir(), `vercel-plugin-${sessionId}-subagent-ledger.jsonl`);
-}
-function appendLedger(entry) {
- const path = ledgerPath(entry.session_id);
- try {
- appendFileSync(path, JSON.stringify(entry) + "\n", "utf-8");
- } catch (error) {
- logCaughtError(log, "subagent-stop-sync:append-ledger-failed", error, { path });
- }
-}
-function main() {
- const input = parseInput();
- if (!input) {
- process.exit(0);
- }
- const sessionId = input.session_id;
- if (!sessionId) {
- process.exit(0);
- }
- const agentId = input.agent_id ?? "unknown";
- const agentType = input.agent_type ?? "unknown";
- log.debug("subagent-stop-sync", { sessionId, agentId, agentType });
- let ledgerEntryWritten = false;
- try {
- appendLedger({
- timestamp: (/* @__PURE__ */ new Date()).toISOString(),
- session_id: sessionId,
- agent_id: agentId,
- agent_type: agentType,
- agent_transcript_path: input.agent_transcript_path
- });
- ledgerEntryWritten = true;
- } catch (error) {
- logCaughtError(log, "subagent-stop-sync:ledger-write-failed", error, {
- sessionId,
- agentId
- });
- }
- let skillsInjected = 0;
- try {
- const claimed = listSessionKeys(sessionId, "seen-skills", agentId !== "unknown" ? agentId : void 0);
- skillsInjected = claimed.length;
- } catch {
- }
- log.summary("subagent-stop-sync:complete", {
- agent_id: agentId,
- agent_type: agentType,
- skills_injected: skillsInjected,
- ledger_entry_written: ledgerEntryWritten
- });
- process.exit(0);
-}
-var ENTRYPOINT = fileURLToPath(import.meta.url);
-var isEntrypoint = process.argv[1] ? resolve(process.argv[1]) === ENTRYPOINT : false;
-if (isEntrypoint) {
- main();
-}
-export {
- appendLedger,
- ledgerPath,
- main,
- parseInput
-};
diff --git a/hooks/telemetry.mjs b/hooks/telemetry.mjs
index 9da5742..344558a 100644
--- a/hooks/telemetry.mjs
+++ b/hooks/telemetry.mjs
@@ -1,134 +1,80 @@
// hooks/src/telemetry.mts
import { randomUUID } from "crypto";
-import { mkdirSync, readFileSync, writeFileSync } from "fs";
+import { mkdirSync, statSync, writeFileSync } from "fs";
import { join, dirname } from "path";
import { homedir } from "os";
-var MAX_VALUE_BYTES = 1e5;
-var TRUNCATION_SUFFIX = "[TRUNCATED]";
var BRIDGE_ENDPOINT = "https://telemetry.vercel.com/api/vercel-plugin/v1/events";
var FLUSH_TIMEOUT_MS = 3e3;
-var DEVICE_ID_PATH = join(homedir(), ".claude", "vercel-plugin-device-id");
-function truncateValue(value) {
- if (Buffer.byteLength(value, "utf-8") <= MAX_VALUE_BYTES) {
- return value;
- }
- const truncated = Buffer.from(value, "utf-8").subarray(0, MAX_VALUE_BYTES).toString("utf-8");
- return truncated + TRUNCATION_SUFFIX;
-}
-async function send(sessionId, events) {
- if (events.length === 0) return;
+var DAU_STAMP_PATH = join(homedir(), ".config", "vercel-plugin", "dau-stamp");
+async function sendDau(events) {
+ if (events.length === 0) return false;
const controller = new AbortController();
const timeout = setTimeout(() => controller.abort(), FLUSH_TIMEOUT_MS);
try {
- await fetch(BRIDGE_ENDPOINT, {
+ const response = await fetch(BRIDGE_ENDPOINT, {
method: "POST",
headers: {
"Content-Type": "application/json",
- "x-vercel-plugin-session-id": sessionId,
- "x-vercel-plugin-topic-id": "generic"
+ "x-vercel-plugin-topic-id": "dau"
},
body: JSON.stringify(events),
signal: controller.signal
});
+ return response.ok;
} catch {
+ return false;
} finally {
clearTimeout(timeout);
}
}
-function getOrCreateDeviceId() {
+function getDauStampPath() {
+ return DAU_STAMP_PATH;
+}
+function utcDayStamp(date) {
+ return date.toISOString().slice(0, 10);
+}
+function shouldSendDauPing(now = /* @__PURE__ */ new Date()) {
try {
- const existing = readFileSync(DEVICE_ID_PATH, "utf-8").trim();
- if (existing.length > 0) return existing;
+ const existingMtime = statSync(DAU_STAMP_PATH).mtime;
+ return utcDayStamp(existingMtime) !== utcDayStamp(now);
} catch {
+ return true;
}
- const deviceId = randomUUID();
+}
+function markDauPingSent(now = /* @__PURE__ */ new Date()) {
+ void now;
try {
- mkdirSync(dirname(DEVICE_ID_PATH), { recursive: true });
- writeFileSync(DEVICE_ID_PATH, deviceId);
+ mkdirSync(dirname(DAU_STAMP_PATH), { recursive: true });
+ writeFileSync(DAU_STAMP_PATH, "", { flag: "w" });
} catch {
}
- return deviceId;
}
function getTelemetryOverride(env = process.env) {
const value = env.VERCEL_PLUGIN_TELEMETRY?.trim().toLowerCase();
if (value === "off") return value;
return null;
}
-function isBaseTelemetryEnabled(env = process.env) {
+function isDauTelemetryEnabled(env = process.env) {
return getTelemetryOverride(env) !== "off";
}
-function isContentTelemetryEnabled(env = process.env) {
- const override = getTelemetryOverride(env);
- if (override === "off") return false;
- try {
- const prefPath = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
- const pref = readFileSync(prefPath, "utf-8").trim();
- return pref === "enabled";
- } catch {
- return false;
- }
-}
-function isPromptTelemetryEnabled(env = process.env) {
- return isContentTelemetryEnabled(env);
-}
-async function trackBaseEvent(sessionId, key, value) {
- if (!isBaseTelemetryEnabled()) return;
- const event = {
- id: randomUUID(),
- event_time: Date.now(),
- key,
- value: truncateValue(value)
- };
- await send(sessionId, [event]);
-}
-async function trackBaseEvents(sessionId, entries) {
- if (!isBaseTelemetryEnabled() || entries.length === 0) return;
- const now = Date.now();
- const events = entries.map((entry) => ({
- id: randomUUID(),
- event_time: now,
- key: entry.key,
- value: truncateValue(entry.value)
- }));
- await send(sessionId, events);
-}
-async function trackContentEvent(sessionId, key, value) {
- if (!isContentTelemetryEnabled()) return;
- const event = {
+async function trackDauActiveToday(now = /* @__PURE__ */ new Date()) {
+ if (!isDauTelemetryEnabled() || !shouldSendDauPing(now)) return;
+ const eventTime = now.getTime();
+ const sent = await sendDau([{
id: randomUUID(),
- event_time: Date.now(),
- key,
- value: truncateValue(value)
- };
- await send(sessionId, [event]);
-}
-async function trackContentEvents(sessionId, entries) {
- if (!isContentTelemetryEnabled() || entries.length === 0) return;
- const now = Date.now();
- const events = entries.map((entry) => ({
- id: randomUUID(),
- event_time: now,
- key: entry.key,
- value: truncateValue(entry.value)
- }));
- await send(sessionId, events);
-}
-async function trackEvent(sessionId, key, value) {
- await trackContentEvent(sessionId, key, value);
-}
-async function trackEvents(sessionId, entries) {
- await trackContentEvents(sessionId, entries);
+ event_time: eventTime,
+ key: "dau:active_today",
+ value: "1"
+ }]);
+ if (sent) {
+ markDauPingSent(now);
+ }
}
export {
- getOrCreateDeviceId,
+ getDauStampPath,
getTelemetryOverride,
- isBaseTelemetryEnabled,
- isContentTelemetryEnabled,
- isPromptTelemetryEnabled,
- trackBaseEvent,
- trackBaseEvents,
- trackContentEvent,
- trackContentEvents,
- trackEvent,
- trackEvents
+ isDauTelemetryEnabled,
+ markDauPingSent,
+ shouldSendDauPing,
+ trackDauActiveToday
};
diff --git a/hooks/user-prompt-submit-skill-inject.mjs b/hooks/user-prompt-submit-skill-inject.mjs
index 12c1e65..37c74e7 100755
--- a/hooks/user-prompt-submit-skill-inject.mjs
+++ b/hooks/user-prompt-submit-skill-inject.mjs
@@ -25,7 +25,6 @@ import { normalizePromptText, compilePromptSignals, matchPromptWithReason, score
import { searchSkills, initializeLexicalIndex } from "./lexical-index.mjs";
import { analyzePrompt } from "./prompt-analysis.mjs";
import { createLogger, logDecision } from "./logger.mjs";
-import { trackBaseEvents } from "./telemetry.mjs";
import { selectManagedContextChunk } from "./vercel-context.mjs";
var MAX_SKILLS = 2;
var DEFAULT_INJECTION_BUDGET_BYTES = 8e3;
@@ -707,19 +706,6 @@ function run() {
droppedByBudget
}, cwd);
}
- if (sessionId && loaded.length > 0) {
- const telemetryEntries = [];
- for (const skill of loaded) {
- const r = report.perSkillResults[skill];
- telemetryEntries.push(
- { key: "prompt:skill", value: skill },
- { key: "prompt:score", value: String(r?.score ?? 0) },
- { key: "prompt:hook", value: "UserPromptSubmit" }
- );
- }
- trackBaseEvents(sessionId, telemetryEntries).catch(() => {
- });
- }
let outputEnv;
const envFile = nonEmptyString(process.env.CLAUDE_ENV_FILE);
const seenSkills = hasFileDedup ? syncedSeenSkills : seenState;
diff --git a/hooks/user-prompt-submit-telemetry.mjs b/hooks/user-prompt-submit-telemetry.mjs
deleted file mode 100755
index 351c1c0..0000000
--- a/hooks/user-prompt-submit-telemetry.mjs
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env node
-
-// hooks/src/user-prompt-submit-telemetry.mts
-import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs";
-import { homedir, tmpdir } from "os";
-import { join, dirname } from "path";
-import { getTelemetryOverride, isContentTelemetryEnabled, trackContentEvents } from "./telemetry.mjs";
-var PREF_PATH = join(homedir(), ".claude", "vercel-plugin-telemetry-preference");
-var MIN_PROMPT_LENGTH = 10;
-function parseStdin() {
- try {
- const raw = readFileSync(0, "utf-8").trim();
- if (!raw) return null;
- return JSON.parse(raw);
- } catch {
- return null;
- }
-}
-function resolveSessionId(input) {
- return input.session_id || input.conversation_id || "";
-}
-function resolvePrompt(input) {
- return input.prompt || input.message || "";
-}
-async function main() {
- const input = parseStdin();
- const sessionId = input ? resolveSessionId(input) : "";
- const prompt = input ? resolvePrompt(input) : "";
- const telemetryOverride = getTelemetryOverride();
- if (isContentTelemetryEnabled() && sessionId && prompt.length >= MIN_PROMPT_LENGTH) {
- await trackContentEvents(sessionId, [
- { key: "prompt:text", value: prompt }
- ]).catch(() => {
- });
- }
- if (telemetryOverride === "off") {
- process.stdout.write("{}");
- process.exit(0);
- }
- try {
- const pref = readFileSync(PREF_PATH, "utf-8").trim();
- if (pref === "enabled" || pref === "disabled" || pref === "asked") {
- process.stdout.write("{}");
- process.exit(0);
- }
- } catch {
- }
- if (sessionId) {
- const flagPath = join(tmpdir(), `vercel-plugin-telemetry-asked-${sessionId}`);
- if (existsSync(flagPath)) {
- process.stdout.write("{}");
- process.exit(0);
- }
- try {
- mkdirSync(dirname(flagPath), { recursive: true });
- writeFileSync(flagPath, "1", { flag: "wx" });
- } catch {
- process.stdout.write("{}");
- process.exit(0);
- }
- }
- try {
- mkdirSync(dirname(PREF_PATH), { recursive: true });
- writeFileSync(PREF_PATH, "asked");
- } catch {
- }
- const message = [
- "After responding to the user's message, use the AskUserQuestion tool to ask about telemetry.",
- "Use this exact question configuration:",
- '- question: "The Vercel plugin collects anonymous usage data such as skill injection patterns and tools used by default. Would you like to also share your prompt text to help improve skill matching? This choice only controls prompt telemetry."',
- '- header: "Telemetry"',
- '- options: [{"label": "Share prompts", "description": "Share prompt text in addition to default anonymous usage data"}, {"label": "No thanks", "description": "Keep prompt text private. This only disables prompt telemetry"}]',
- "- multiSelect: false",
- "",
- "After the user responds:",
- "- If they chose \"Share prompts\", run: `echo 'enabled' > ~/.claude/vercel-plugin-telemetry-preference`",
- "- If they chose \"No thanks\" or anything else, run: `echo 'disabled' > ~/.claude/vercel-plugin-telemetry-preference`"
- ].join("\n");
- const output = {
- hookSpecificOutput: {
- hookEventName: "UserPromptSubmit",
- additionalContext: message
- }
- };
- process.stdout.write(JSON.stringify(output));
- process.exit(0);
-}
-main();
diff --git a/package.json b/package.json
index 9aba542..a6389ef 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "vercel-plugin",
- "version": "0.32.7",
+ "version": "0.40.0",
"private": true,
"bin": {
"vercel-plugin": "src/cli/index.ts"
diff --git a/scripts/coverage-baseline.ts b/scripts/coverage-baseline.ts
index 92fb94e..aca1dd9 100644
--- a/scripts/coverage-baseline.ts
+++ b/scripts/coverage-baseline.ts
@@ -238,8 +238,29 @@ export async function checkCoverage(root: string): Promise {
);
// Load graph
- const graphPath = join(root, "assets", "vercel-ecosystem-graph.md");
- const graph = await readFile(graphPath, "utf-8");
+ const candidateGraphPaths = [
+ join(root, "vercel.md"),
+ join(root, "assets", "vercel-ecosystem-graph.md"),
+ ];
+
+ let graph = "";
+ let loaded = false;
+ let lastError: unknown = null;
+
+ for (const graphPath of candidateGraphPaths) {
+ try {
+ graph = await readFile(graphPath, "utf-8");
+ loaded = true;
+ break;
+ } catch (error) {
+ lastError = error;
+ }
+ }
+
+ if (!loaded) {
+ throw lastError instanceof Error ? lastError : new Error("Could not load ecosystem graph");
+ }
+
const graphLower = graph.toLowerCase();
// Check coverage
diff --git a/scripts/validate.ts b/scripts/validate.ts
index 1f498fd..d5f234d 100644
--- a/scripts/validate.ts
+++ b/scripts/validate.ts
@@ -669,18 +669,18 @@ async function validateCliBannedPatterns() {
}
// ---------------------------------------------------------------------------
-// 8. Validate PreToolUse hook and skill-map coverage
+// 8. Validate hook-driven injection coverage and skill frontmatter
// ---------------------------------------------------------------------------
async function validatePreToolUseHook() {
- section("[8] PreToolUse hook and skill frontmatter coverage");
+ section("[8] Hook-driven injection and skill frontmatter coverage");
- // 8a. Check PreToolUse hook exists in hooks.json
+ // 8a. Check whether the optional PreToolUse injection hook is registered.
const hooksPath = join(ROOT, "hooks", "hooks.json");
if (!(await exists(hooksPath))) {
- fail("HOOKS_MISSING", "hooks/hooks.json not found (cannot validate PreToolUse)", {
+ fail("HOOKS_MISSING", "hooks/hooks.json not found (cannot validate hook-driven injection wiring)", {
file: "hooks/hooks.json",
- hint: "Create hooks/hooks.json with PreToolUse hook definitions",
+ hint: "Create hooks/hooks.json with your hook definitions",
});
return;
}
@@ -694,49 +694,47 @@ async function validatePreToolUseHook() {
}
const preToolUse = hooks?.hooks?.PreToolUse;
- if (!preToolUse || !Array.isArray(preToolUse) || preToolUse.length === 0) {
- fail("PRETOOLUSE_MISSING", "hooks.json has no PreToolUse hook defined", {
- file: "hooks/hooks.json",
- hint: "Add a PreToolUse matcher group to hooks.json",
- });
- return;
- }
+ const hasPreToolUse = Array.isArray(preToolUse) && preToolUse.length > 0;
- // Check matcher covers Read|Edit|Write|Bash
- const matcher = preToolUse[0]?.matcher || "";
- for (const tool of ["Read", "Edit", "Write", "Bash"]) {
- if (!matcher.includes(tool)) {
- fail("PRETOOLUSE_MATCHER_INCOMPLETE", `PreToolUse matcher missing "${tool}" — current: "${matcher}"`, {
- file: "hooks/hooks.json",
- hint: `Add "${tool}" to the PreToolUse matcher pattern`,
- });
+ if (!hasPreToolUse) {
+ pass("No PreToolUse hook registered by default; hook wiring check skipped");
+ } else {
+ // Check matcher covers Read|Edit|Write|Bash
+ const matcher = preToolUse[0]?.matcher || "";
+ for (const tool of ["Read", "Edit", "Write", "Bash"]) {
+ if (!matcher.includes(tool)) {
+ fail("PRETOOLUSE_MATCHER_INCOMPLETE", `PreToolUse matcher missing "${tool}" — current: "${matcher}"`, {
+ file: "hooks/hooks.json",
+ hint: `Add "${tool}" to the PreToolUse matcher pattern`,
+ });
+ }
+ }
+ if (["Read", "Edit", "Write", "Bash"].every((t) => matcher.includes(t))) {
+ pass("PreToolUse matcher covers Read|Edit|Write|Bash");
}
- }
- if (["Read", "Edit", "Write", "Bash"].every((t) => matcher.includes(t))) {
- pass("PreToolUse matcher covers Read|Edit|Write|Bash");
- }
- // 8b. Check referenced hook script exists
- const hookCmd = preToolUse[0]?.hooks?.[0]?.command || "";
- const scriptMatch = hookCmd.match(/pretooluse-skill-inject\.mjs/);
- if (!scriptMatch) {
- fail("PRETOOLUSE_SCRIPT_REF", "PreToolUse hook command does not reference pretooluse-skill-inject.mjs", {
- file: "hooks/hooks.json",
- hint: "Set the hook command to reference pretooluse-skill-inject.mjs",
- });
- } else {
- const scriptPath = join(ROOT, "hooks", "pretooluse-skill-inject.mjs");
- if (await exists(scriptPath)) {
- pass("pretooluse-skill-inject.mjs exists");
- } else {
- fail("PRETOOLUSE_SCRIPT_MISSING", "hooks/pretooluse-skill-inject.mjs not found", {
- file: "hooks/pretooluse-skill-inject.mjs",
- hint: "Create the PreToolUse hook script at hooks/pretooluse-skill-inject.mjs",
+ // 8b. Check referenced hook script exists
+ const hookCmd = preToolUse[0]?.hooks?.[0]?.command || "";
+ const scriptMatch = hookCmd.match(/pretooluse-skill-inject\.mjs/);
+ if (!scriptMatch) {
+ fail("PRETOOLUSE_SCRIPT_REF", "PreToolUse hook command does not reference pretooluse-skill-inject.mjs", {
+ file: "hooks/hooks.json",
+ hint: "Set the hook command to reference pretooluse-skill-inject.mjs",
});
+ } else {
+ const scriptPath = join(ROOT, "hooks", "pretooluse-skill-inject.mjs");
+ if (await exists(scriptPath)) {
+ pass("pretooluse-skill-inject.mjs exists");
+ } else {
+ fail("PRETOOLUSE_SCRIPT_MISSING", "hooks/pretooluse-skill-inject.mjs not found", {
+ file: "hooks/pretooluse-skill-inject.mjs",
+ hint: "Create the PreToolUse hook script at hooks/pretooluse-skill-inject.mjs",
+ });
+ }
}
}
- // 8c. Validate skill frontmatter triggers
+ // 8b. Validate skill frontmatter triggers
// Every skills/*/SKILL.md should have metadata.pathPatterns or metadata.bashPatterns
const skillsDir = join(ROOT, "skills");
if (!(await exists(skillsDir))) {
diff --git a/skills/knowledge-update/SKILL.md b/skills/knowledge-update/SKILL.md
index a1a2dd1..ccdfcaf 100644
--- a/skills/knowledge-update/SKILL.md
+++ b/skills/knowledge-update/SKILL.md
@@ -65,7 +65,7 @@ export const config: VercelConfig = {
## New products LLMs may not know about
-- **Vercel AI Gateway**: unified API to access multiple AI providers with observability, model fallbacks, zero data retention, and image/video generation. GA since August 2025.
+- **Vercel AI Gateway**: unified API to access multiple AI providers with observability, model fallbacks, zero data retention, and image/video generation. GA since August 2025. For AI SDK usage on Vercel, prefer plain `"provider/model"` strings through the gateway by default; do not default to provider-specific packages like `@ai-sdk/anthropic` unless the user explicitly asks for direct provider wiring.
- **Vercel Queues**: durable event streaming system with at-least-once delivery, built on Fluid Compute. Public beta.
- **Vercel Sandbox**: sandboxed code execution environment. GA since January 2026.
- **Rolling Releases**: gradual/canary rollout for deployments. GA since June 2025.
diff --git a/src/commands/doctor.ts b/src/commands/doctor.ts
index 66910e1..9370b1e 100644
--- a/src/commands/doctor.ts
+++ b/src/commands/doctor.ts
@@ -12,15 +12,6 @@ import { existsSync, readFileSync, statSync, readdirSync } from "node:fs";
import { join } from "node:path";
import { loadValidatedSkillMap } from "../shared/skill-map-loader.ts";
-/** Maximum allowed timeout (seconds) for subagent hooks. */
-const SUBAGENT_HOOK_TIMEOUT_MAX = 5;
-
-/** Expected subagent hook events that must be registered. */
-const REQUIRED_SUBAGENT_EVENTS = ["SubagentStart", "SubagentStop"] as const;
-
-/** Agent types that matchers should cover. */
-const EXPECTED_AGENT_TYPES = ["Explore", "Plan", "general-purpose"];
-
/** Threshold at which pattern count may threaten the 5-second hook timeout. */
const PATTERN_COUNT_WARN_THRESHOLD = 200;
@@ -48,6 +39,39 @@ export function doctor(projectRoot: string): DoctorResult {
const issues: DoctorIssue[] = [];
const skillsDir = join(projectRoot, "skills");
const manifestPath = join(projectRoot, "generated", "skill-manifest.json");
+ const hooksJsonPath = join(projectRoot, "hooks", "hooks.json");
+
+ let hooksConfig: { hooks?: Record } = {};
+ if (existsSync(hooksJsonPath)) {
+ try {
+ hooksConfig = JSON.parse(readFileSync(hooksJsonPath, "utf-8"));
+ } catch (err: any) {
+ issues.push({
+ severity: "error",
+ check: "hooks",
+ message: `Failed to parse hooks.json: ${err.message}`,
+ });
+ }
+ }
+
+ const registeredHooks = hooksConfig.hooks ?? {};
+ const hasAutomaticSkillInjectionHooks =
+ (registeredHooks.PreToolUse ?? []).some((entry: any) =>
+ Array.isArray(entry?.hooks)
+ && entry.hooks.some(
+ (hook: any) =>
+ typeof hook?.command === "string"
+ && hook.command.includes("pretooluse-skill-inject.mjs"),
+ ),
+ )
+ || (registeredHooks.UserPromptSubmit ?? []).some((entry: any) =>
+ Array.isArray(entry?.hooks)
+ && entry.hooks.some(
+ (hook: any) =>
+ typeof hook?.command === "string"
+ && hook.command.includes("user-prompt-submit-skill-inject.mjs"),
+ ),
+ );
// --- Live scan ---
const { validation, skills: loadedSkills, buildDiagnostics } = loadValidatedSkillMap(skillsDir);
@@ -191,7 +215,7 @@ export function doctor(projectRoot: string): DoctorResult {
(skill.pathPatterns?.length ?? 0) + (skill.bashPatterns?.length ?? 0);
}
- if (liveSkillCount > SKILL_COUNT_WARN_THRESHOLD) {
+ if (hasAutomaticSkillInjectionHooks && liveSkillCount > SKILL_COUNT_WARN_THRESHOLD) {
issues.push({
severity: "warning",
check: "hook-timeout",
@@ -200,7 +224,7 @@ export function doctor(projectRoot: string): DoctorResult {
});
}
- if (totalPatterns > PATTERN_COUNT_WARN_THRESHOLD) {
+ if (hasAutomaticSkillInjectionHooks && totalPatterns > PATTERN_COUNT_WARN_THRESHOLD) {
issues.push({
severity: "warning",
check: "hook-timeout",
@@ -328,92 +352,12 @@ export function doctor(projectRoot: string): DoctorResult {
}
}
- // --- Subagent hook registration ---
- const hooksJsonPath = join(projectRoot, "hooks", "hooks.json");
- if (existsSync(hooksJsonPath)) {
- let hooksConfig: { hooks?: Record };
- try {
- hooksConfig = JSON.parse(readFileSync(hooksJsonPath, "utf-8"));
- } catch (err: any) {
- hooksConfig = {};
- issues.push({
- severity: "error",
- check: "subagent-hooks",
- message: `Failed to parse hooks.json: ${err.message}`,
- });
- }
-
- const registeredHooks = hooksConfig.hooks ?? {};
-
- for (const event of REQUIRED_SUBAGENT_EVENTS) {
- const entries = registeredHooks[event];
- if (!entries || !Array.isArray(entries) || entries.length === 0) {
- issues.push({
- severity: "error",
- check: "subagent-hooks",
- message: `${event} hook is not registered in hooks.json`,
- hint: `Add a ${event} entry to hooks/hooks.json to enable subagent skill injection`,
- });
- continue;
- }
-
- // Validate timeout for each hook command in each entry
- for (const entry of entries) {
- const hooks = entry.hooks ?? [];
- for (const hook of hooks) {
- if (hook.timeout !== undefined && hook.timeout > SUBAGENT_HOOK_TIMEOUT_MAX) {
- issues.push({
- severity: "warning",
- check: "subagent-hooks",
- message: `${event} hook timeout is ${hook.timeout}s (max recommended: ${SUBAGENT_HOOK_TIMEOUT_MAX}s)`,
- hint: "High timeouts can slow down subagent launches",
- });
- }
- }
- }
-
- // Validate matcher coverage for expected agent types
- const matchers = entries
- .map((e: any) => e.matcher)
- .filter((m: any) => typeof m === "string" && m.length > 0);
-
- if (matchers.length === 0) {
- issues.push({
- severity: "warning",
- check: "subagent-hooks",
- message: `${event} has no matcher — will not match any agent types`,
- hint: "Set matcher to '.+' to match all agent types, or list specific types",
- });
- } else {
- // Check if each expected agent type is covered by at least one matcher
- const uncovered: string[] = [];
- for (const agentType of EXPECTED_AGENT_TYPES) {
- const covered = matchers.some((m: string) => {
- try {
- return new RegExp(m).test(agentType);
- } catch {
- return false;
- }
- });
- if (!covered) uncovered.push(agentType);
- }
-
- if (uncovered.length > 0) {
- issues.push({
- severity: "warning",
- check: "subagent-hooks",
- message: `${event} matchers don't cover agent types: ${uncovered.join(", ")}`,
- hint: "Use '.+' to match all types, or add specific matchers for these agent types",
- });
- }
- }
- }
- } else {
+ if (!existsSync(hooksJsonPath)) {
issues.push({
severity: "error",
- check: "subagent-hooks",
- message: "hooks/hooks.json not found — subagent hooks cannot be validated",
- hint: "Ensure hooks/hooks.json exists with SubagentStart and SubagentStop entries",
+ check: "hooks",
+ message: "hooks/hooks.json not found",
+ hint: "Ensure hooks/hooks.json exists",
});
}
diff --git a/tests/doctor-subagent-hooks.test.ts b/tests/doctor-subagent-hooks.test.ts
deleted file mode 100644
index db9966c..0000000
--- a/tests/doctor-subagent-hooks.test.ts
+++ /dev/null
@@ -1,233 +0,0 @@
-import { describe, test, expect } from "bun:test";
-import { resolve, join } from "node:path";
-import { mkdtempSync, mkdirSync, writeFileSync, rmSync, cpSync } from "node:fs";
-import { tmpdir } from "node:os";
-import { doctor, type DoctorResult } from "../src/commands/doctor.ts";
-
-const ROOT = resolve(import.meta.dir, "..");
-
-/**
- * Create a minimal project fixture by copying skills/ and generated/ from ROOT,
- * plus writing a custom hooks.json.
- */
-function createFixture(hooksJson: unknown): string {
- const dir = mkdtempSync(join(tmpdir(), "doctor-subagent-"));
- // Copy skills dir (needed for live scan)
- cpSync(join(ROOT, "skills"), join(dir, "skills"), { recursive: true });
- // Copy generated manifest
- mkdirSync(join(dir, "generated"), { recursive: true });
- cpSync(
- join(ROOT, "generated", "skill-manifest.json"),
- join(dir, "generated", "skill-manifest.json")
- );
- // Write custom hooks.json
- mkdirSync(join(dir, "hooks"), { recursive: true });
- writeFileSync(
- join(dir, "hooks", "hooks.json"),
- JSON.stringify(hooksJson, null, 2)
- );
- return dir;
-}
-
-function cleanup(dir: string) {
- rmSync(dir, { recursive: true, force: true });
-}
-
-function issuesForCheck(result: DoctorResult, check: string) {
- return result.issues.filter((i) => i.check === check);
-}
-
-// ---------------------------------------------------------------------------
-// SubagentStart / SubagentStop registration
-// ---------------------------------------------------------------------------
-
-describe("doctor: subagent hooks", () => {
- test("passes when SubagentStart and SubagentStop are properly registered", () => {
- const dir = createFixture({
- hooks: {
- SubagentStart: [
- {
- matcher: ".+",
- hooks: [{ type: "command", command: "echo start", timeout: 5 }],
- },
- ],
- SubagentStop: [
- {
- matcher: ".+",
- hooks: [{ type: "command", command: "echo stop", timeout: 5 }],
- },
- ],
- },
- });
- try {
- const result = doctor(dir);
- const subagentIssues = issuesForCheck(result, "subagent-hooks");
- expect(subagentIssues).toHaveLength(0);
- } finally {
- cleanup(dir);
- }
- });
-
- test("errors when SubagentStart is missing", () => {
- const dir = createFixture({
- hooks: {
- SubagentStop: [
- {
- matcher: ".+",
- hooks: [{ type: "command", command: "echo stop", timeout: 5 }],
- },
- ],
- },
- });
- try {
- const result = doctor(dir);
- const subagentIssues = issuesForCheck(result, "subagent-hooks");
- const startError = subagentIssues.find(
- (i) => i.severity === "error" && i.message.includes("SubagentStart")
- );
- expect(startError).toBeDefined();
- } finally {
- cleanup(dir);
- }
- });
-
- test("errors when SubagentStop is missing", () => {
- const dir = createFixture({
- hooks: {
- SubagentStart: [
- {
- matcher: ".+",
- hooks: [{ type: "command", command: "echo start", timeout: 5 }],
- },
- ],
- },
- });
- try {
- const result = doctor(dir);
- const subagentIssues = issuesForCheck(result, "subagent-hooks");
- const stopError = subagentIssues.find(
- (i) => i.severity === "error" && i.message.includes("SubagentStop")
- );
- expect(stopError).toBeDefined();
- } finally {
- cleanup(dir);
- }
- });
-
- test("warns when timeout exceeds 5s", () => {
- const dir = createFixture({
- hooks: {
- SubagentStart: [
- {
- matcher: ".+",
- hooks: [{ type: "command", command: "echo start", timeout: 10 }],
- },
- ],
- SubagentStop: [
- {
- matcher: ".+",
- hooks: [{ type: "command", command: "echo stop", timeout: 5 }],
- },
- ],
- },
- });
- try {
- const result = doctor(dir);
- const subagentIssues = issuesForCheck(result, "subagent-hooks");
- const timeoutWarn = subagentIssues.find(
- (i) =>
- i.severity === "warning" &&
- i.message.includes("timeout") &&
- i.message.includes("10s")
- );
- expect(timeoutWarn).toBeDefined();
- } finally {
- cleanup(dir);
- }
- });
-
- test("warns when matcher doesn't cover expected agent types", () => {
- const dir = createFixture({
- hooks: {
- SubagentStart: [
- {
- matcher: "Explore",
- hooks: [{ type: "command", command: "echo start", timeout: 5 }],
- },
- ],
- SubagentStop: [
- {
- matcher: "Explore",
- hooks: [{ type: "command", command: "echo stop", timeout: 5 }],
- },
- ],
- },
- });
- try {
- const result = doctor(dir);
- const subagentIssues = issuesForCheck(result, "subagent-hooks");
- // Should warn about Plan and general-purpose not being covered
- const matcherWarn = subagentIssues.find(
- (i) => i.severity === "warning" && i.message.includes("don't cover")
- );
- expect(matcherWarn).toBeDefined();
- expect(matcherWarn!.message).toContain("Plan");
- expect(matcherWarn!.message).toContain("general-purpose");
- } finally {
- cleanup(dir);
- }
- });
-
- test("warns when matcher is empty string (matches nothing)", () => {
- const dir = createFixture({
- hooks: {
- SubagentStart: [
- {
- matcher: "",
- hooks: [{ type: "command", command: "echo start", timeout: 5 }],
- },
- ],
- SubagentStop: [
- {
- matcher: ".+",
- hooks: [{ type: "command", command: "echo stop", timeout: 5 }],
- },
- ],
- },
- });
- try {
- const result = doctor(dir);
- const subagentIssues = issuesForCheck(result, "subagent-hooks");
- const emptyMatcherWarn = subagentIssues.find(
- (i) =>
- i.severity === "warning" &&
- i.message.includes("SubagentStart") &&
- i.message.includes("no matcher")
- );
- expect(emptyMatcherWarn).toBeDefined();
- } finally {
- cleanup(dir);
- }
- });
-
- test("errors when hooks.json is missing", () => {
- // Create fixture without hooks.json
- const dir = mkdtempSync(join(tmpdir(), "doctor-subagent-"));
- cpSync(join(ROOT, "skills"), join(dir, "skills"), { recursive: true });
- mkdirSync(join(dir, "generated"), { recursive: true });
- cpSync(
- join(ROOT, "generated", "skill-manifest.json"),
- join(dir, "generated", "skill-manifest.json")
- );
- try {
- const result = doctor(dir);
- const subagentIssues = issuesForCheck(result, "subagent-hooks");
- const missingError = subagentIssues.find(
- (i) => i.severity === "error" && i.message.includes("hooks.json not found")
- );
- expect(missingError).toBeDefined();
- } finally {
- cleanup(dir);
- }
- });
-});
diff --git a/tests/hooks-json-structural.test.ts b/tests/hooks-json-structural.test.ts
index 2681513..d43e89f 100644
--- a/tests/hooks-json-structural.test.ts
+++ b/tests/hooks-json-structural.test.ts
@@ -1,6 +1,5 @@
/**
- * Structural validation: hooks.json contains SubagentStart and SubagentStop
- * entries with the expected matchers and timeouts.
+ * Structural validation: the default hook profile stays lightweight.
*/
import { describe, test, expect } from "bun:test";
import { resolve } from "node:path";
@@ -24,36 +23,58 @@ interface HooksJson {
const hooksJson: HooksJson = await import(resolve(ROOT, "hooks/hooks.json"));
-describe("hooks.json SubagentStart", () => {
- const groups = hooksJson.hooks.SubagentStart;
+describe("hooks.json lightweight default", () => {
+ test("does not register pretool skill injection by default", () => {
+ const groups = hooksJson.hooks.PreToolUse ?? [];
+ const hasSkillInjection = groups.some((group) =>
+ group.hooks.some((hook) => hook.command.includes("pretooluse-skill-inject.mjs")),
+ );
- test("array exists with at least one entry", () => {
- expect(Array.isArray(groups)).toBe(true);
- expect(groups.length).toBeGreaterThanOrEqual(1);
+ expect(hasSkillInjection).toBe(false);
});
- test("matcher is '.+'", () => {
- expect(groups[0].matcher).toBe(".+");
+ test("does not register user-prompt skill injection by default", () => {
+ const groups = hooksJson.hooks.UserPromptSubmit ?? [];
+ const hasSkillInjection = groups.some((group) =>
+ group.hooks.some((hook) => hook.command.includes("user-prompt-submit-skill-inject.mjs")),
+ );
+
+ expect(hasSkillInjection).toBe(false);
});
- test("hook has timeout set to 5", () => {
- expect(groups[0].hooks[0].timeout).toBe(5);
+ test("does not register prompt telemetry hooks by default", () => {
+ const groups = hooksJson.hooks.UserPromptSubmit ?? [];
+ const hasPromptTelemetry = groups.some((group) =>
+ group.hooks.some((hook) => hook.command.includes("user-prompt-submit-telemetry.mjs")),
+ );
+
+ expect(hasPromptTelemetry).toBe(false);
});
-});
-describe("hooks.json SubagentStop", () => {
- const groups = hooksJson.hooks.SubagentStop;
+ test("does not register post-tool injection hooks by default", () => {
+ const groups = hooksJson.hooks.PostToolUse ?? [];
+ const hasPostToolInjection = groups.some((group) =>
+ group.hooks.some((hook) =>
+ hook.command.includes("posttooluse-bash-chain.mjs")
+ || hook.command.includes("posttooluse-validate.mjs")
+ || hook.command.includes("posttooluse-shadcn-font-fix.mjs"),
+ ),
+ );
- test("array exists with at least one entry", () => {
- expect(Array.isArray(groups)).toBe(true);
- expect(groups.length).toBeGreaterThanOrEqual(1);
+ expect(hasPostToolInjection).toBe(false);
});
- test("matcher is '.+'", () => {
- expect(groups[0].matcher).toBe(".+");
+ test("does not register verification observer by default", () => {
+ const groups = hooksJson.hooks.PostToolUse ?? [];
+ const hasVerificationObserver = groups.some((group) =>
+ group.hooks.some((hook) => hook.command.includes("posttooluse-verification-observe.mjs")),
+ );
+
+ expect(hasVerificationObserver).toBe(false);
});
- test("hook has timeout set to 5", () => {
- expect(groups[0].hooks[0].timeout).toBe(5);
+ test("does not register subagent bootstrap hooks by default", () => {
+ expect(hooksJson.hooks.SubagentStart ?? []).toEqual([]);
+ expect(hooksJson.hooks.SubagentStop ?? []).toEqual([]);
});
});
diff --git a/tests/inject-claude-md.test.ts b/tests/inject-claude-md.test.ts
index 059df86..4701ea9 100644
--- a/tests/inject-claude-md.test.ts
+++ b/tests/inject-claude-md.test.ts
@@ -1,8 +1,11 @@
-import { describe, test, expect } from "bun:test";
+import { afterEach, beforeEach, describe, test, expect } from "bun:test";
+import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs";
+import { tmpdir } from "node:os";
import { join, resolve } from "node:path";
const ROOT = resolve(import.meta.dirname, "..");
const HOOK_SCRIPT = join(ROOT, "hooks", "inject-claude-md.mjs");
+let tempDir: string;
async function runHook(
payload: Record,
@@ -33,6 +36,14 @@ async function runHook(
}
describe("inject-claude-md", () => {
+ beforeEach(() => {
+ tempDir = mkdtempSync(join(tmpdir(), "inject-claude-md-"));
+ });
+
+ afterEach(() => {
+ rmSync(tempDir, { recursive: true, force: true });
+ });
+
test("injects thin session context instead of the full vercel ecosystem graph", async () => {
const { code, stdout } = await runHook({ session_id: "inject-thin-session" });
expect(code).toBe(0);
@@ -50,6 +61,33 @@ describe("inject-claude-md", () => {
expect(stdout).toContain("Greenfield execution mode");
});
+ test("skips injection for non-empty non-vercel projects", async () => {
+ const projectDir = join(tempDir, "plain-project");
+ mkdirSync(projectDir);
+ writeFileSync(join(projectDir, "README.md"), "# Plain project");
+
+ const { code, stdout } = await runHook(
+ { session_id: "inject-thin-skip" },
+ { CLAUDE_PROJECT_ROOT: projectDir },
+ );
+
+ expect(code).toBe(0);
+ expect(stdout.trim()).toBe("");
+ });
+
+ test("still injects for empty directories", async () => {
+ const projectDir = join(tempDir, "greenfield-project");
+ mkdirSync(projectDir);
+
+ const { code, stdout } = await runHook(
+ { session_id: "inject-thin-empty" },
+ { CLAUDE_PROJECT_ROOT: projectDir },
+ );
+
+ expect(code).toBe(0);
+ expect(stdout).toContain("Vercel Plugin Session Context");
+ });
+
test("cursor payload returns flat JSON with thin additional context", async () => {
const { code, stdout } = await runHook({
conversation_id: "inject-thin-cursor",
diff --git a/tests/posttooluse-chain.test.ts b/tests/posttooluse-chain.test.ts
deleted file mode 100644
index a4bcb6a..0000000
--- a/tests/posttooluse-chain.test.ts
+++ /dev/null
@@ -1,4699 +0,0 @@
-/**
- * Tests for PostToolUse chain injection (chainTo rules).
- *
- * Covers:
- * - chainTo match triggers skill injection in additionalContext
- * - already-seen skill is NOT re-injected via chain
- * - chain depth limit (single hop — no recursive chaining)
- * - chainTo with no matches produces no additionalContext
- * - multiple chainTo matches inject only highest-priority target (first match per target)
- * - byte budget is respected for chained content
- */
-
-import { describe, test, expect, beforeEach } from "bun:test";
-import { existsSync, mkdirSync, rmSync, writeFileSync } from "node:fs";
-import { join, resolve } from "node:path";
-import { tmpdir } from "node:os";
-
-const ROOT = resolve(import.meta.dirname, "..");
-const HOOK_SCRIPT = join(ROOT, "hooks", "posttooluse-validate.mjs");
-
-// Unique session ID per test run
-let testSession: string;
-
-beforeEach(() => {
- testSession = `chain-test-${Date.now()}-${Math.random().toString(36).slice(2)}`;
-});
-
-/**
- * Extract postValidation metadata from additionalContext.
- */
-function extractPostValidation(hookSpecificOutput: any): any {
- const ctx = hookSpecificOutput?.additionalContext || "";
- const match = ctx.match(//);
- if (!match) return undefined;
- try { return JSON.parse(match[1]); } catch { return undefined; }
-}
-
-async function runHook(
- input: object,
- extraEnv?: Record,
-): Promise<{ code: number; stdout: string; stderr: string; parsed: any; ctx: string }> {
- const payload = JSON.stringify({ ...input, session_id: testSession });
- const proc = Bun.spawn(["node", HOOK_SCRIPT], {
- stdin: "pipe",
- stdout: "pipe",
- stderr: "pipe",
- env: {
- ...process.env,
- VERCEL_PLUGIN_VALIDATED_FILES: "",
- VERCEL_PLUGIN_SEEN_SKILLS: "",
- ...extraEnv,
- },
- });
- proc.stdin.write(payload);
- proc.stdin.end();
- const code = await proc.exited;
- const stdout = await new Response(proc.stdout).text();
- const stderr = await new Response(proc.stderr).text();
- let parsed: any = {};
- let ctx = "";
- try {
- parsed = JSON.parse(stdout);
- ctx = parsed?.hookSpecificOutput?.additionalContext || "";
- } catch {}
- return { code, stdout, stderr, parsed, ctx };
-}
-
-// ---------------------------------------------------------------------------
-// Unit tests for runChainInjection (imported from compiled module)
-// ---------------------------------------------------------------------------
-
-describe("runChainInjection unit tests", () => {
- let runChainInjection: typeof import("../hooks/src/posttooluse-validate.mts").runChainInjection;
- let formatOutput: typeof import("../hooks/src/posttooluse-validate.mts").formatOutput;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-validate.mjs");
- runChainInjection = mod.runChainInjection;
- formatOutput = mod.formatOutput;
- });
-
- test("chainTo match triggers skill injection", () => {
- // Use a small target skill (micro ~3KB) to stay within 18KB budget
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "SOME_PATTERN",
- targetSkill: "micro",
- message: "Loading micro guidance.",
- },
- ]],
- ]);
-
- const fileContent = `const x = SOME_PATTERN;\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null, // no session (skip dedup file ops)
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].sourceSkill).toBe("test-source");
- expect(result.injected[0].targetSkill).toBe("micro");
- expect(result.injected[0].message).toBe("Loading micro guidance.");
- expect(result.injected[0].content.length).toBeGreaterThan(0);
- expect(result.totalBytes).toBeGreaterThan(0);
- });
-
- test("already-seen skill is NOT re-injected via chain", () => {
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "TRIGGER_PATTERN",
- targetSkill: "micro",
- },
- ]],
- ]);
-
- const fileContent = `const x = TRIGGER_PATTERN;\n`;
- const fakeEnv: any = {
- VERCEL_PLUGIN_SEEN_SKILLS: "micro",
- };
-
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null,
- ROOT,
- undefined,
- fakeEnv,
- );
-
- expect(result.injected.length).toBe(0);
- expect(result.totalBytes).toBe(0);
- });
-
- test("chain depth is limited to 1 hop (no recursive chaining)", () => {
- // Simulate: source-a chains to micro, which itself has chainTo rules.
- // runChainInjection only processes the matchedSkills passed in — it does NOT
- // recursively process chainTo rules of injected targets. This is the "single hop" guarantee.
- const chainMap = new Map([
- ["source-a", [
- {
- pattern: "TRIGGER",
- targetSkill: "micro", // ~3KB, fits in budget
- },
- ]],
- // micro also has a chain rule, but it should NOT fire
- // because micro is not in matchedSkills
- ["micro", [
- {
- pattern: ".*",
- targetSkill: "cron-jobs",
- },
- ]],
- ]);
-
- const fileContent = `const x = TRIGGER;\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["source-a"], // only source-a is matched — micro is NOT in matchedSkills
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- // Only micro should be injected (from source-a chain), NOT cron-jobs
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].targetSkill).toBe("micro");
- // Confirm cron-jobs was NOT injected
- expect(result.injected.every((i) => i.targetSkill !== "cron-jobs")).toBe(true);
- });
-
- test("chainTo with no matches produces no injections", () => {
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "SOMETHING_THAT_WONT_MATCH",
- targetSkill: "micro",
- },
- ]],
- ]);
-
- const fileContent = `import { generateText } from 'ai';\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(0);
- expect(result.totalBytes).toBe(0);
- });
-
- test("multiple chainTo matches inject only one entry per target skill (first wins)", () => {
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "FIRST_MATCH",
- targetSkill: "micro",
- message: "First chain rule",
- },
- {
- pattern: "SECOND_MATCH",
- targetSkill: "micro",
- message: "Second chain rule — same target, should be deduped",
- },
- ]],
- ]);
-
- const fileContent = `const a = FIRST_MATCH;\nconst b = SECOND_MATCH;\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- // Only one injection for micro (first match wins)
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].targetSkill).toBe("micro");
- expect(result.injected[0].message).toBe("First chain rule");
- });
-
- test("byte budget is respected for chained content", () => {
- // micro (~3KB) + swr (~5KB) + cron-jobs (~2KB) = ~10KB — all fit within 18KB budget
- // Raise cap to 10 so budget is the limiting factor, not the cap
- const chainMap = new Map([
- ["source-a", [
- {
- pattern: "PATTERN_A",
- targetSkill: "micro", // ~3KB
- },
- ]],
- ["source-b", [
- {
- pattern: "PATTERN_B",
- targetSkill: "swr", // ~5KB
- },
- ]],
- ["source-c", [
- {
- pattern: "PATTERN_C",
- targetSkill: "cron-jobs", // ~2KB
- },
- ]],
- ]);
-
- const fileContent = `PATTERN_A;\nPATTERN_B;\nPATTERN_C;\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "", VERCEL_PLUGIN_CHAIN_CAP: "10" };
-
- const result = runChainInjection(
- fileContent,
- ["source-a", "source-b", "source-c"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.totalBytes).toBeLessThanOrEqual(18_000);
- expect(result.totalBytes).toBeGreaterThan(0);
- // All three should fit within budget (cap raised to 10)
- expect(result.injected.length).toBe(3);
- });
-
- test("chainTo with nonexistent target skill is skipped gracefully", () => {
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "TRIGGER",
- targetSkill: "nonexistent-skill-that-does-not-exist",
- },
- ]],
- ]);
-
- const fileContent = `const x = TRIGGER;\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(0);
- });
-
- test("chain injection is capped at VERCEL_PLUGIN_CHAIN_CAP (default 2)", () => {
- // Three distinct targets, but cap is 2
- const chainMap = new Map([
- ["source-a", [
- { pattern: "PAT_A", targetSkill: "micro" },
- ]],
- ["source-b", [
- { pattern: "PAT_B", targetSkill: "swr" },
- ]],
- ["source-c", [
- { pattern: "PAT_C", targetSkill: "cron-jobs" },
- ]],
- ]);
-
- const fileContent = `PAT_A;\nPAT_B;\nPAT_C;\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["source-a", "source-b", "source-c"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- // Default cap is 2
- expect(result.injected.length).toBe(2);
- });
-
- test("chain cap is configurable via VERCEL_PLUGIN_CHAIN_CAP env var", () => {
- const chainMap = new Map([
- ["source-a", [
- { pattern: "PAT_A", targetSkill: "micro" },
- ]],
- ["source-b", [
- { pattern: "PAT_B", targetSkill: "swr" },
- ]],
- ["source-c", [
- { pattern: "PAT_C", targetSkill: "cron-jobs" },
- ]],
- ]);
-
- const fileContent = `PAT_A;\nPAT_B;\nPAT_C;\n`;
-
- // Cap set to 1
- const envCap1: any = { VERCEL_PLUGIN_SEEN_SKILLS: "", VERCEL_PLUGIN_CHAIN_CAP: "1" };
- const result1 = runChainInjection(
- fileContent,
- ["source-a", "source-b", "source-c"],
- chainMap,
- null,
- ROOT,
- undefined,
- envCap1,
- );
- expect(result1.injected.length).toBe(1);
-
- // Cap set to 10 — all 3 should be injected
- const envCap10: any = { VERCEL_PLUGIN_SEEN_SKILLS: "", VERCEL_PLUGIN_CHAIN_CAP: "10" };
- const result10 = runChainInjection(
- fileContent,
- ["source-a", "source-b", "source-c"],
- chainMap,
- null,
- ROOT,
- undefined,
- envCap10,
- );
- expect(result10.injected.length).toBe(3);
- });
-
- test("skipIfFileContains skips chain rule when file matches the guard regex", () => {
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "@vercel/postgres",
- targetSkill: "micro",
- skipIfFileContains: "@neondatabase/serverless",
- },
- ]],
- ]);
-
- // File already uses the replacement — chain should NOT fire
- const fileContent = `import { neon } from '@neondatabase/serverless';\nimport { sql } from '@vercel/postgres';\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(0);
- });
-
- test("skipIfFileContains does NOT skip when guard regex does not match", () => {
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "@vercel/postgres",
- targetSkill: "micro",
- skipIfFileContains: "@neondatabase/serverless",
- },
- ]],
- ]);
-
- // File uses deprecated import but NOT the replacement — chain should fire
- const fileContent = `import { sql } from '@vercel/postgres';\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].targetSkill).toBe("micro");
- });
-
- test("loop prevention: A→B chain is blocked when B is already seen", () => {
- // Simulate: skill A chains to B, but B is already seen
- const chainMap = new Map([
- ["skill-a", [
- { pattern: "TRIGGER", targetSkill: "micro" },
- ]],
- ]);
-
- const fileContent = `const x = TRIGGER;\n`;
- const envWithSeen: any = { VERCEL_PLUGIN_SEEN_SKILLS: "micro" };
- const result = runChainInjection(
- fileContent,
- ["skill-a"],
- chainMap,
- null,
- ROOT,
- undefined,
- envWithSeen,
- );
-
- expect(result.injected.length).toBe(0);
- });
-
- test("loop prevention: bidirectional A↔B only injects once", () => {
- // A chains to B, B chains to A — but A is already a matched skill (seen)
- const chainMap = new Map([
- ["skill-a", [
- { pattern: "TRIGGER_B", targetSkill: "micro" },
- ]],
- ["micro", [
- { pattern: "TRIGGER_A", targetSkill: "skill-a" },
- ]],
- ]);
-
- const fileContent = `TRIGGER_B;\nTRIGGER_A;\n`;
- // skill-a is already seen (it was the matched skill that triggered this PostToolUse)
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "skill-a" };
- const result = runChainInjection(
- fileContent,
- ["skill-a"], // only skill-a is in matchedSkills — micro is NOT
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- // micro gets injected (skill-a chains to it)
- // But micro's chain back to skill-a doesn't fire because micro isn't in matchedSkills
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].targetSkill).toBe("micro");
- });
-
- test("chainTo with invalid regex pattern is skipped gracefully", () => {
- const chainMap = new Map([
- ["test-source", [
- {
- pattern: "[invalid(regex",
- targetSkill: "micro",
- },
- ]],
- ]);
-
- const fileContent = `const x = TRIGGER;\n`;
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runChainInjection(
- fileContent,
- ["test-source"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(0);
- });
-});
-
-// ---------------------------------------------------------------------------
-// Real-world chain/validate scenario tests (use actual skill frontmatter)
-// ---------------------------------------------------------------------------
-
-describe("real-world chain and validate scenarios", () => {
- let loadValidateRules: typeof import("../hooks/src/posttooluse-validate.mts").loadValidateRules;
- let matchFileToSkills: typeof import("../hooks/src/posttooluse-validate.mts").matchFileToSkills;
- let runValidation: typeof import("../hooks/src/posttooluse-validate.mts").runValidation;
- let runChainInjection: typeof import("../hooks/src/posttooluse-validate.mts").runChainInjection;
-
- let data: NonNullable>;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-validate.mjs");
- loadValidateRules = mod.loadValidateRules;
- matchFileToSkills = mod.matchFileToSkills;
- runValidation = mod.runValidation;
- runChainInjection = mod.runChainInjection;
-
- const loaded = loadValidateRules(ROOT);
- if (!loaded) throw new Error("loadValidateRules returned null — no skills with validate/chainTo rules");
- data = loaded;
- });
-
- test("workflow file with DurableAgent import (no ai-sdk) triggers ai-sdk chain", () => {
- const filePath = "/project/workflows/review.ts";
- const fileContent = [
- `import { DurableAgent } from "@workflow/ai/agent";`,
- `import { createWorkflow } from "workflow";`,
- ``,
- `const wf = createWorkflow({ id: "review" });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("workflow");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- expect(chainResult.injected.length).toBeGreaterThanOrEqual(1);
- const aiSdkChain = chainResult.injected.find((i) => i.targetSkill === "ai-sdk");
- expect(aiSdkChain).toBeDefined();
- expect(aiSdkChain!.sourceSkill).toBe("workflow");
- expect(aiSdkChain!.content.length).toBeGreaterThan(0);
- });
-
- test("turbo.json with 'pipeline' key triggers turborepo validate error with upgradeToSkill", () => {
- const filePath = "/project/turbo.json";
- const fileContent = JSON.stringify({
- "$schema": "https://turbo.build/schema.json",
- "pipeline": {
- "build": { "dependsOn": ["^build"] },
- "lint": {},
- },
- }, null, 2);
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("turborepo");
-
- const violations = runValidation(fileContent, matched, data.rulesMap);
- const pipelineViolation = violations.find((v) => v.skill === "turborepo" && v.matchedText.includes("pipeline"));
- expect(pipelineViolation).toBeDefined();
- expect(pipelineViolation!.severity).toBe("error");
- expect(pipelineViolation!.upgradeToSkill).toBe("turborepo");
- expect(pipelineViolation!.message).toContain("tasks");
- });
-
- test("file with generateObject( triggers ai-sdk validate error", () => {
- const filePath = "/project/app/api/extract/route.ts";
- const fileContent = [
- `import { generateObject } from 'ai';`,
- `import { z } from 'zod';`,
- ``,
- `const result = await generateObject({`,
- ` model: 'openai/gpt-5.4',`,
- ` schema: z.object({ name: z.string() }),`,
- ` prompt: 'Extract the name',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const violations = runValidation(fileContent, matched, data.rulesMap);
- const genObjViolation = violations.find((v) => v.skill === "ai-sdk" && v.matchedText.includes("generateObject"));
- expect(genObjViolation).toBeDefined();
- expect(genObjViolation!.severity).toBe("error");
- expect(genObjViolation!.message).toContain("Output.object");
- });
-
- test("file with maxSteps: triggers ai-sdk validate recommendation", () => {
- const filePath = "/project/app/api/agent/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- ``,
- `const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` maxSteps: 5,`,
- ` prompt: 'Plan a trip',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const violations = runValidation(fileContent, matched, data.rulesMap);
- const maxStepsViolation = violations.find((v) => v.skill === "ai-sdk" && v.matchedText.includes("maxSteps"));
- expect(maxStepsViolation).toBeDefined();
- expect(maxStepsViolation!.severity).toBe("recommended");
- expect(maxStepsViolation!.upgradeToSkill).toBe("ai-sdk");
- expect(maxStepsViolation!.message).toContain("stepCountIs");
- });
-
- test("file with dall-e reference triggers ai-gateway upgrade", () => {
- const filePath = "/project/app/api/image/route.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- ``,
- `const result = await generateText({`,
- ` model: 'openai/dall-e-3',`,
- ` prompt: 'A sunset over mountains',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const violations = runValidation(fileContent, matched, data.rulesMap);
- const dalleViolation = violations.find((v) => v.skill === "ai-sdk" && v.matchedText.includes("dall-e"));
- expect(dalleViolation).toBeDefined();
- expect(dalleViolation!.severity).toBe("recommended");
- expect(dalleViolation!.upgradeToSkill).toBe("ai-gateway");
- expect(dalleViolation!.message).toContain("gemini-3.1-flash-image-preview");
- });
-
- test("file with toDataStreamResponse triggers ai-sdk validate recommendation", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` prompt: 'Hello!',`,
- ` });`,
- ` return result.toDataStreamResponse();`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const violations = runValidation(fileContent, matched, data.rulesMap);
- const tdsViolation = violations.find((v) => v.skill === "ai-sdk" && v.matchedText.includes("toDataStreamResponse"));
- expect(tdsViolation).toBeDefined();
- expect(tdsViolation!.severity).toBe("recommended");
- expect(tdsViolation!.message).toContain("toUIMessageStreamResponse");
- });
-
- // -------------------------------------------------------------------
- // New chainTo coverage: diverse cross-skill chain scenarios
- // -------------------------------------------------------------------
-
- test("vercel-storage file with @vercel/postgres import chains to nextjs", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = [
- `import { sql } from '@vercel/postgres';`,
- ``,
- `export async function getUsers() {`,
- ` const { rows } = await sql\`SELECT * FROM users\`;`,
- ` return rows;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("vercel-storage");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const nextjsChain = chainResult.injected.find((i) => i.targetSkill === "nextjs");
- expect(nextjsChain).toBeDefined();
- expect(nextjsChain!.sourceSkill).toBe("vercel-storage");
- expect(nextjsChain!.message).toContain("@vercel/postgres");
- expect(nextjsChain!.message).toContain("sunset");
- });
-
- test("shadcn file with react-markdown chains to ai-elements", () => {
- const filePath = "/project/components/chat-display.tsx";
- const fileContent = [
- `import ReactMarkdown from 'react-markdown';`,
- `import { cn } from '@/lib/utils';`,
- ``,
- `export function ChatBubble({ text }: { text: string }) {`,
- ` return {text};`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("shadcn");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiElementsChain = chainResult.injected.find((i) => i.targetSkill === "ai-elements");
- expect(aiElementsChain).toBeDefined();
- expect(aiElementsChain!.sourceSkill).toBe("shadcn");
- expect(aiElementsChain!.message).toContain("markdown");
- });
-
- test("shadcn react-markdown chain is skipped when ai-elements already imported (skipIfFileContains)", () => {
- const filePath = "/project/components/chat-display.tsx";
- const fileContent = [
- `import ReactMarkdown from 'react-markdown';`,
- `import { MessageResponse } from '@/components/ai-elements/message';`,
- `import { cn } from '@/lib/utils';`,
- ``,
- `export function ChatBubble({ text }: { text: string }) {`,
- ` return ;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("shadcn");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'ai-elements|MessageResponse| i.sourceSkill === "shadcn" && i.targetSkill === "ai-elements",
- );
- expect(aiElementsChain).toBeUndefined();
- });
-
- test("routing-middleware file with IP blocklist chains to vercel-firewall", () => {
- const filePath = "/project/middleware.ts";
- const fileContent = [
- `import { NextRequest, NextResponse } from 'next/server';`,
- ``,
- `const blockedIps = ['1.2.3.4', '5.6.7.8'];`,
- ``,
- `export function middleware(req: NextRequest) {`,
- ` const ip = req.ip || '';`,
- ` if (blockedIps.includes(ip)) return NextResponse.json({}, { status: 403 });`,
- ` return NextResponse.next();`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("routing-middleware");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const firewallChain = chainResult.injected.find((i) => i.targetSkill === "vercel-firewall");
- expect(firewallChain).toBeDefined();
- expect(firewallChain!.sourceSkill).toBe("routing-middleware");
- expect(firewallChain!.message).toContain("Firewall");
- });
-
- test("cron-jobs file with node-cron import chains to vercel-functions", () => {
- const filePath = "/project/lib/scheduler.ts";
- const fileContent = [
- `import cron from 'node-cron';`,
- ``,
- `cron.schedule('0 */6 * * *', async () => {`,
- ` await syncExternalData();`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("cron-jobs");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const functionsChain = chainResult.injected.find((i) => i.targetSkill === "vercel-functions");
- expect(functionsChain).toBeDefined();
- expect(functionsChain!.sourceSkill).toBe("cron-jobs");
- expect(functionsChain!.message).toContain("cron");
- });
-
- test("react-best-practices file with axios import chains to swr", () => {
- const filePath = "/project/components/UserList.tsx";
- const fileContent = [
- `import React, { useEffect, useState } from 'react';`,
- `import axios from 'axios';`,
- ``,
- `export function UserList() {`,
- ` const [users, setUsers] = useState([]);`,
- ` useEffect(() => {`,
- ` axios.get('/api/users').then(res => setUsers(res.data));`,
- ` }, []);`,
- ` return {users.map(u => - {u.name}
)}
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("react-best-practices");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const swrChain = chainResult.injected.find((i) => i.targetSkill === "swr");
- expect(swrChain).toBeDefined();
- expect(swrChain!.sourceSkill).toBe("react-best-practices");
- expect(swrChain!.message).toContain("axios");
- });
-
- test("payments file with setTimeout chains to workflow", () => {
- const filePath = "/project/app/api/webhooks/stripe/route.ts";
- const fileContent = [
- `import Stripe from 'stripe';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const event = await parseWebhook(req);`,
- ` if (event.type === 'payment_intent.succeeded') {`,
- ` // Retry with backoff`,
- ` setTimeout(() => fulfillOrder(event.data.object), 5000);`,
- ` }`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("payments");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const workflowChain = chainResult.injected.find((i) => i.targetSkill === "workflow");
- expect(workflowChain).toBeDefined();
- expect(workflowChain!.sourceSkill).toBe("payments");
- expect(workflowChain!.message).toContain("Workflow");
- });
-
- test("env-vars file with ANTHROPIC_API_KEY chains to ai-gateway", () => {
- const filePath = "/project/.env.local";
- const fileContent = [
- `# AI provider keys`,
- `ANTHROPIC_API_KEY=sk-ant-api...`,
- `DATABASE_URL=postgres://...`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("env-vars");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const gatewayChain = chainResult.injected.find((i) => i.targetSkill === "ai-gateway");
- expect(gatewayChain).toBeDefined();
- expect(gatewayChain!.sourceSkill).toBe("env-vars");
- expect(gatewayChain!.message).toContain("OIDC");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: nextjs chains
- // -------------------------------------------------------------------
-
- test("nextjs file with middleware export chains to routing-middleware", () => {
- const filePath = "/project/middleware.ts";
- const fileContent = [
- `import { NextResponse } from 'next/server';`,
- ``,
- `export default function middleware(req) {`,
- ` return NextResponse.next();`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("nextjs");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const routingChain = chainResult.injected.find((i) => i.targetSkill === "routing-middleware");
- expect(routingChain).toBeDefined();
- expect(routingChain!.sourceSkill).toBe("nextjs");
- expect(routingChain!.message).toContain("proxy");
- });
-
- test("nextjs file with @ai-sdk/openai chains to ai-gateway", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- `import { openai } from '@ai-sdk/openai';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const result = streamText({ model: openai('gpt-5.4'), prompt: 'Hello' });`,
- ` return result.toUIMessageStreamResponse();`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("nextjs");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const gatewayChain = chainResult.injected.find((i) => i.targetSkill === "ai-gateway");
- expect(gatewayChain).toBeDefined();
- expect(gatewayChain!.sourceSkill).toBe("nextjs");
- expect(gatewayChain!.message).toContain("Gateway");
- });
-
- test("nextjs file with next-auth chains to auth", () => {
- const filePath = "/project/app/api/auth/[...nextauth]/route.ts";
- const fileContent = [
- `import NextAuth from 'next-auth';`,
- `import { authOptions } from '@/lib/auth';`,
- ``,
- `const handler = NextAuth(authOptions);`,
- `export { handler as GET, handler as POST };`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("nextjs");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const authChain = chainResult.injected.find((i) => i.targetSkill === "auth");
- expect(authChain).toBeDefined();
- expect(authChain!.sourceSkill).toBe("nextjs");
- });
-
- test("nextjs file with NextApiRequest chains to vercel-functions", () => {
- const filePath = "/project/pages/api/users.ts";
- const fileContent = [
- `import type { NextApiRequest, NextApiResponse } from 'next';`,
- ``,
- `export default function handler(req: NextApiRequest, res: NextApiResponse) {`,
- ` res.status(200).json({ users: [] });`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("nextjs");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const functionsChain = chainResult.injected.find((i) => i.targetSkill === "vercel-functions");
- expect(functionsChain).toBeDefined();
- expect(functionsChain!.sourceSkill).toBe("nextjs");
- });
-
- test("nextjs file with lru-cache chains to runtime-cache", () => {
- const filePath = "/project/lib/cache.ts";
- const fileContent = [
- `import { LRUCache } from 'lru-cache';`,
- ``,
- `const cache = new LRUCache({ max: 500, ttl: 60000 });`,
- `export default cache;`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- // Should match nextjs via import or another skill
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const cacheChain = chainResult.injected.find((i) => i.targetSkill === "runtime-cache");
- if (cacheChain) {
- expect(cacheChain.message).toContain("cache");
- }
- });
-
- test("nextjs file with JWT handling chains to auth (skipIfFileContains)", () => {
- const filePath = "/project/lib/auth.ts";
- const fileContent = [
- `import jwt from 'jsonwebtoken';`,
- ``,
- `export function verifyToken(token: string) {`,
- ` return jwt.verify(token, process.env.JWT_SECRET!);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const authChain = chainResult.injected.find((i) => i.targetSkill === "auth");
- if (authChain) {
- expect(authChain.message).toContain("Auth");
- }
- });
-
- test("nextjs JWT chain is skipped when Clerk is already imported (skipIfFileContains)", () => {
- const filePath = "/project/lib/auth.ts";
- const fileContent = [
- `import { clerkMiddleware } from '@clerk/nextjs/server';`,
- `import jwt from 'jsonwebtoken';`,
- ``,
- `// Legacy verification for migration`,
- `export function verifyToken(token: string) {`,
- ` return jwt.verify(token, process.env.JWT_SECRET!);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains should suppress the auth chain since clerkMiddleware is present
- const authChainFromNextjs = chainResult.injected.find(
- (i) => i.sourceSkill === "nextjs" && i.targetSkill === "auth" && i.message?.includes("JWT"),
- );
- expect(authChainFromNextjs).toBeUndefined();
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: vercel-functions chains
- // -------------------------------------------------------------------
-
- test("vercel-functions file with direct OpenAI SDK chains to ai-sdk", () => {
- const filePath = "/project/app/api/generate/route.ts";
- const fileContent = [
- `import OpenAI from 'openai';`,
- ``,
- `const openai = new OpenAI();`,
- `export async function POST(req: Request) {`,
- ` const completion = await openai.chat.completions.create({`,
- ` model: 'gpt-5.4',`,
- ` messages: [{ role: 'user', content: 'Hello' }],`,
- ` });`,
- ` return Response.json(completion);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("vercel-functions");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-functions" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChain).toBeDefined();
- expect(aiSdkChain!.message).toContain("AI SDK");
- });
-
- test("vercel-functions file with writeFile chains to vercel-storage", () => {
- const filePath = "/project/app/api/upload/route.ts";
- const fileContent = [
- `import { writeFileSync } from 'node:fs';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const data = await req.arrayBuffer();`,
- ` writeFileSync('/tmp/upload.bin', Buffer.from(data));`,
- ` return Response.json({ ok: true });`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("vercel-functions");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-functions" && i.targetSkill === "vercel-storage",
- );
- expect(storageChain).toBeDefined();
- expect(storageChain!.message).toContain("Storage");
- });
-
- test("vercel-functions file with deprecated AI SDK v5 APIs chains to ai-sdk", () => {
- const filePath = "/project/app/api/extract/route.ts";
- const fileContent = [
- `import { generateObject } from 'ai';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const result = await generateObject({`,
- ` model: 'openai/gpt-5.4',`,
- ` schema: z.object({ name: z.string() }),`,
- ` prompt: 'Extract the name',`,
- ` });`,
- ` return Response.json(result.object);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("vercel-functions");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-functions" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChain).toBeDefined();
- expect(aiSdkChain!.message).toContain("v5");
- });
-
- test("vercel-functions polling loop chain is skipped when workflow is already used (skipIfFileContains)", () => {
- const filePath = "/project/app/api/poll/route.ts";
- const fileContent = [
- `'use workflow';`,
- ``,
- `while (true) {`,
- ` const status = await checkStatus();`,
- ` if (status === 'done') break;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains should suppress the workflow chain since 'use workflow' is present
- const workflowChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-functions" && i.targetSkill === "workflow",
- );
- expect(workflowChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: ai-gateway chains
- // -------------------------------------------------------------------
-
- test("ai-gateway file with direct provider SDK import chains to ai-sdk", () => {
- const filePath = "/project/lib/ai.ts";
- const fileContent = [
- `import { anthropic } from '@ai-sdk/anthropic';`,
- `import { generateText } from 'ai';`,
- ``,
- `const result = await generateText({`,
- ` model: anthropic('claude-sonnet-4.6'),`,
- ` prompt: 'Hello!',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-gateway");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChain).toBeDefined();
- expect(aiSdkChain!.message).toContain("Gateway");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: email chains
- // -------------------------------------------------------------------
-
- test("email file with setTimeout chains to workflow", () => {
- const filePath = "/project/lib/email.ts";
- const fileContent = [
- `import { Resend } from 'resend';`,
- ``,
- `const resend = new Resend(process.env.RESEND_API_KEY);`,
- ``,
- `export async function sendWelcomeEmail(to: string) {`,
- ` // Delay the follow-up email`,
- ` setTimeout(async () => {`,
- ` await resend.emails.send({ from: 'hi@example.com', to, subject: 'Follow up' });`,
- ` }, 86400000);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("email");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const workflowChain = chainResult.injected.find(
- (i) => i.sourceSkill === "email" && i.targetSkill === "workflow",
- );
- expect(workflowChain).toBeDefined();
- expect(workflowChain!.message).toContain("Workflow");
- });
-
- test("email retry chain is skipped when workflow is already used (skipIfFileContains)", () => {
- const filePath = "/project/lib/email.ts";
- const fileContent = [
- `import { createWorkflow } from 'workflow';`,
- `import { Resend } from 'resend';`,
- ``,
- `let retries = 0;`,
- `const maxRetries = 3;`,
- `try { await send(); } catch { retry(); }`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains should suppress the retry→workflow chain
- const retryChain = chainResult.injected.find(
- (i) => i.sourceSkill === "email" && i.targetSkill === "workflow" && i.message?.includes("retry"),
- );
- expect(retryChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: vercel-queues chains
- // -------------------------------------------------------------------
-
- test("vercel-queues file with BullMQ chains to workflow", () => {
- const filePath = "/project/lib/queue.ts";
- const fileContent = [
- `import { Queue, Worker } from 'bullmq';`,
- ``,
- `const queue = new Queue('email-queue');`,
- `const worker = new Worker('email-queue', async (job) => {`,
- ` await sendEmail(job.data);`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("vercel-queues");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const workflowChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-queues" && i.targetSkill === "workflow",
- );
- expect(workflowChain).toBeDefined();
- expect(workflowChain!.message).toContain("BullMQ");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: vercel-firewall chains
- // -------------------------------------------------------------------
-
- test("vercel-firewall file with express-rate-limit chains to routing-middleware", () => {
- const filePath = "/project/lib/rate-limit.ts";
- const fileContent = [
- `import rateLimit from 'express-rate-limit';`,
- ``,
- `export const limiter = rateLimit({`,
- ` windowMs: 15 * 60 * 1000,`,
- ` max: 100,`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("vercel-firewall");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const routingChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-firewall" && i.targetSkill === "routing-middleware",
- );
- expect(routingChain).toBeDefined();
- expect(routingChain!.message).toContain("rate limit");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: satori chains
- // -------------------------------------------------------------------
-
- test("satori file with puppeteer chains to vercel-functions", () => {
- const filePath = "/project/lib/og.ts";
- const fileContent = [
- `import puppeteer from 'puppeteer';`,
- ``,
- `export async function generateOG(title: string) {`,
- ` const browser = await puppeteer.launch({ headless: true });`,
- ` const page = await browser.newPage();`,
- ` await page.setContent('' + title + '
');`,
- ` const screenshot = await page.screenshot();`,
- ` await browser.close();`,
- ` return screenshot;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("satori");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const functionsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "satori" && i.targetSkill === "vercel-functions",
- );
- expect(functionsChain).toBeDefined();
- expect(functionsChain!.message).toContain("browser");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: vercel-flags chains
- // -------------------------------------------------------------------
-
- test("vercel-flags file with env var feature flags chains to vercel-storage", () => {
- const filePath = "/project/lib/flags.ts";
- const fileContent = [
- `const isNewUI = process.env.FEATURE_NEW_UI === 'true';`,
- `const isEnabled = process.env.ENABLE_DARK_MODE === '1';`,
- ``,
- `export { isNewUI, isEnabled };`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("vercel-flags");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-flags" && i.targetSkill === "vercel-storage",
- );
- expect(storageChain).toBeDefined();
- expect(storageChain!.message).toContain("Edge Config");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: workflow → ai-elements
- // -------------------------------------------------------------------
-
- test("workflow file with useChat chains to ai-elements (skipIfFileContains guard)", () => {
- const filePath = "/project/app/workflow/chat.tsx";
- const fileContent = [
- `'use client';`,
- `import { useChat } from '@ai-sdk/react';`,
- `import { DefaultChatTransport } from '@ai-sdk/react';`,
- ``,
- `export function WorkflowChat() {`,
- ` const { messages } = useChat({ transport: new DefaultChatTransport({ api: '/api/workflow/chat' }) });`,
- ` return {messages.map(m =>
{m.content}
)}
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // Should chain to ai-elements since there's no ai-elements import
- const aiElementsChain = chainResult.injected.find(
- (i) => i.targetSkill === "ai-elements",
- );
- if (matched.includes("workflow")) {
- expect(aiElementsChain).toBeDefined();
- expect(aiElementsChain!.message).toContain("AI Elements");
- }
- });
-
- test("workflow file with useChat is skipped when MessageResponse already imported", () => {
- const filePath = "/project/app/workflow/chat.tsx";
- const fileContent = [
- `'use client';`,
- `import { useChat } from '@ai-sdk/react';`,
- `import { MessageResponse } from '@/components/ai-elements/message';`,
- ``,
- `export function WorkflowChat() {`,
- ` const { messages } = useChat({ transport: new DefaultChatTransport({ api: '/api/workflow/chat' }) });`,
- ` return {messages.map(m => )}
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'ai-elements|MessageResponse| i.sourceSkill === "workflow" && i.targetSkill === "ai-elements",
- );
- expect(aiElementsFromWorkflow).toBeUndefined();
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: geistdocs → nextjs
- // -------------------------------------------------------------------
-
- test("geistdocs file with nextra import chains to nextjs", () => {
- const filePath = "/project/next.config.mjs";
- const fileContent = [
- `import nextra from 'nextra';`,
- ``,
- `const withNextra = nextra({ theme: 'nextra-theme-docs' });`,
- `export default withNextra({});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("geistdocs");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const nextjsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "geistdocs" && i.targetSkill === "nextjs",
- );
- expect(nextjsChain).toBeDefined();
- expect(nextjsChain!.message).toContain("Nextra");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: shadcn → ai-elements (dangerouslySetInnerHTML)
- // -------------------------------------------------------------------
-
- test("shadcn file with dangerouslySetInnerHTML chains to ai-elements", () => {
- const filePath = "/project/components/ai-output.tsx";
- const fileContent = [
- `import { cn } from '@/lib/utils';`,
- ``,
- `export function AIOutput({ html }: { html: string }) {`,
- ` return ;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("shadcn");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiElementsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "shadcn" && i.targetSkill === "ai-elements",
- );
- expect(aiElementsChain).toBeDefined();
- expect(aiElementsChain!.message).toContain("HTML");
- });
-
- // -------------------------------------------------------------------
- // Additional chainTo coverage: react-best-practices → shadcn
- // -------------------------------------------------------------------
-
- test("react-best-practices file with styled-components chains to shadcn", () => {
- const filePath = "/project/components/Button.tsx";
- const fileContent = [
- `import styled from 'styled-components';`,
- ``,
- `const StyledButton = styled.button\``,
- ` background: blue;`,
- ` color: white;`,
- `\`;`,
- ``,
- `export function Button({ children }: { children: React.ReactNode }) {`,
- ` return {children};`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("react-best-practices");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const shadcnChain = chainResult.injected.find(
- (i) => i.sourceSkill === "react-best-practices" && i.targetSkill === "shadcn",
- );
- expect(shadcnChain).toBeDefined();
- expect(shadcnChain!.message).toContain("shadcn");
- });
-
- test("react-best-practices fetch().then() chain to swr is skipped when useSWR present (skipIfFileContains)", () => {
- const filePath = "/project/components/UserList.tsx";
- const fileContent = [
- `import useSWR from 'swr';`,
- ``,
- `// Legacy fetch still in codebase`,
- `fetch('/api/old').then(res => res.json());`,
- ``,
- `export function UserList() {`,
- ` const { data } = useSWR('/api/users', fetcher);`,
- ` return {data?.map(u => - {u.name}
)}
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains should suppress since useSWR is present
- const swrChainFromFetch = chainResult.injected.find(
- (i) => i.sourceSkill === "react-best-practices" && i.targetSkill === "swr"
- && i.message?.includes("fetch"),
- );
- expect(swrChainFromFetch).toBeUndefined();
- });
- // -------------------------------------------------------------------------
- // payments chainTo rules
- // -------------------------------------------------------------------------
-
- test("payments file with manual retry logic chains to workflow", () => {
- const filePath = "/project/app/api/checkout/route.ts";
- const fileContent = [
- `import Stripe from 'stripe';`,
- `let retries = 3;`,
- `while (retries > 0) {`,
- ` try { await charge(); break; } catch { retries--; }`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const workflowChain = chainResult.injected.find((i) => i.targetSkill === "workflow");
- expect(workflowChain).toBeDefined();
- });
-
- test("payments retry chain is skipped when workflow already used (skipIfFileContains)", () => {
- const filePath = "/project/app/api/checkout/route.ts";
- const fileContent = [
- `import { createWorkflow } from 'workflow';`,
- `let retries = 3;`,
- `while (retries > 0) { retries--; }`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const workflowRetryChain = chainResult.injected.find(
- (i) => i.targetSkill === "workflow" && i.message?.includes("retry"),
- );
- expect(workflowRetryChain).toBeUndefined();
- });
-
- test("payments file with Stripe webhook chains to payments", () => {
- const filePath = "/project/app/api/webhook/route.ts";
- const fileContent = [
- `import Stripe from 'stripe';`,
- `const event = stripe.webhooks.constructEvent(body, sig, STRIPE_WEBHOOK_SECRET);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const paymentsChain = chainResult.injected.find((i) => i.targetSkill === "payments");
- expect(paymentsChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // cron-jobs chainTo rules
- // -------------------------------------------------------------------------
-
- test("cron-jobs file with setTimeout chains to workflow", () => {
- const filePath = "/project/app/api/cron/route.ts";
- const fileContent = [
- `export async function GET() {`,
- ` setTimeout(() => processJobs(), 5000);`,
- ` return new Response('OK');`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const workflowChain = chainResult.injected.find((i) => i.targetSkill === "workflow");
- expect(workflowChain).toBeDefined();
- });
-
- test("cron-jobs file with croner import chains to cron-jobs", () => {
- const filePath = "/project/lib/scheduler.ts";
- const fileContent = `import { Cron } from 'croner';\nconst job = new Cron('0 * * * *', () => {});\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const cronChain = chainResult.injected.find((i) => i.targetSkill === "cron-jobs");
- expect(cronChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // routing-middleware chainTo rules
- // -------------------------------------------------------------------------
-
- test("routing-middleware file with next-auth import chains to auth", () => {
- const filePath = "/project/middleware.ts";
- const fileContent = [
- `import { NextResponse } from 'next/server';`,
- `import { getToken } from 'next-auth/jwt';`,
- `export function middleware(req) { return NextResponse.next(); }`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const authChain = chainResult.injected.find((i) => i.targetSkill === "auth");
- expect(authChain).toBeDefined();
- });
-
- test("routing-middleware NextResponse chain is skipped in proxy.ts context (skipIfFileContains)", () => {
- const filePath = "/project/proxy.ts";
- const fileContent = [
- `import { NextResponse } from 'next/server';`,
- `// runtime nodejs`,
- `export function proxy(req) { return NextResponse.next(); }`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const nextjsChain = chainResult.injected.find(
- (i) => i.targetSkill === "nextjs" && i.message?.includes("proxy.ts"),
- );
- expect(nextjsChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // vercel-storage chainTo rules
- // -------------------------------------------------------------------------
-
- test("vercel-storage file with @vercel/kv import chains to nextjs", () => {
- const filePath = "/project/lib/cache.ts";
- const fileContent = `import { kv } from '@vercel/kv';\nconst val = await kv.get('key');\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const nextjsChain = chainResult.injected.find((i) => i.targetSkill === "nextjs");
- expect(nextjsChain).toBeDefined();
- });
-
- test("vercel-storage file with Supabase import chains to vercel-storage", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = `import { createClient } from '@supabase/supabase-js';\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find((i) => i.targetSkill === "vercel-storage");
- expect(storageChain).toBeDefined();
- });
-
- test("vercel-storage file with MongoDB import chains to vercel-storage", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = `import mongoose from 'mongoose';\nawait mongoose.connect(uri);\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find((i) => i.targetSkill === "vercel-storage");
- expect(storageChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // vercel-queues chainTo rules
- // -------------------------------------------------------------------------
-
- test("vercel-queues file with SQS import chains to workflow", () => {
- const filePath = "/project/lib/queue.ts";
- const fileContent = `import { SQSClient } from '@aws-sdk/client-sqs';\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const workflowChain = chainResult.injected.find((i) => i.targetSkill === "workflow");
- expect(workflowChain).toBeDefined();
- });
-
- test("vercel-queues file with p-queue import chains to vercel-queues", () => {
- const filePath = "/project/lib/workers.ts";
- const fileContent = `import PQueue from 'p-queue';\nconst q = new PQueue({ concurrency: 2 });\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const queuesChain = chainResult.injected.find((i) => i.targetSkill === "vercel-queues");
- expect(queuesChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // chat-sdk chainTo rules
- // -------------------------------------------------------------------------
-
- test("chat-sdk file with direct OpenAI import chains to ai-sdk", () => {
- const filePath = "/project/lib/bot.ts";
- const fileContent = [
- `import { Chat } from 'chat';`,
- `import OpenAI from 'openai';`,
- `const openai = new OpenAI();`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find((i) => i.targetSkill === "ai-sdk");
- expect(aiSdkChain).toBeDefined();
- });
-
- test("chat-sdk file with @slack/bolt import chains to chat-sdk", () => {
- const filePath = "/project/lib/bot.ts";
- const fileContent = [
- `import { App } from '@slack/bolt';`,
- `const app = new App({ token: process.env.SLACK_BOT_TOKEN });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const chatSdkChain = chainResult.injected.find((i) => i.targetSkill === "chat-sdk");
- expect(chatSdkChain).toBeDefined();
- });
-
- test("chat-sdk file with discord.js import chains to chat-sdk", () => {
- const filePath = "/project/lib/discord-bot.ts";
- const fileContent = `import { Client } from 'discord.js';\nconst client = new Client();\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const chatSdkChain = chainResult.injected.find((i) => i.targetSkill === "chat-sdk");
- expect(chatSdkChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // email chainTo rules
- // -------------------------------------------------------------------------
-
- test("email file with nodemailer import chains to email", () => {
- const filePath = "/project/lib/mailer.ts";
- const fileContent = [
- `import nodemailer from 'nodemailer';`,
- `const transporter = nodemailer.createTransport({});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const emailChain = chainResult.injected.find((i) => i.targetSkill === "email");
- expect(emailChain).toBeDefined();
- });
-
- test("email file with batch send chains to workflow (skipIfFileContains)", () => {
- const filePath = "/project/lib/campaign.ts";
- const fileContent = [
- `const emails = users.map(u => u.email);`,
- `await Promise.all(emails.map(e => sendEmail(e)));`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // If email skill matched, workflow chain should fire for batch patterns
- const workflowChain = chainResult.injected.find(
- (i) => i.targetSkill === "workflow" && i.message?.includes("batch"),
- );
- // Whether it fires depends on email skill matching — file may or may not match email pathPatterns
- // The important thing: if it fires, it targets workflow
- if (workflowChain) {
- expect(workflowChain.targetSkill).toBe("workflow");
- }
- });
-
- test("email batch chain is skipped when workflow already used", () => {
- const filePath = "/project/lib/campaign.ts";
- const fileContent = [
- `import { createWorkflow } from 'workflow';`,
- `const emails = users.map(u => u.email);`,
- `await Promise.all(emails.map(e => sendEmail(e)));`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const batchChain = chainResult.injected.find(
- (i) => i.targetSkill === "workflow" && i.message?.includes("batch"),
- );
- expect(batchChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // auth chainTo rules
- // -------------------------------------------------------------------------
-
- test("auth file with Vercel OAuth env vars chains to sign-in-with-vercel", () => {
- const filePath = "/project/lib/auth.ts";
- const fileContent = [
- `const clientId = process.env.VERCEL_CLIENT_ID;`,
- `const clientSecret = process.env.VERCEL_CLIENT_SECRET;`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const siVercelChain = chainResult.injected.find((i) => i.targetSkill === "sign-in-with-vercel");
- expect(siVercelChain).toBeDefined();
- });
-
- test("auth file with jsonwebtoken import chains to auth", () => {
- const filePath = "/project/lib/auth.ts";
- const fileContent = [
- `import jwt from 'jsonwebtoken';`,
- `const token = jwt.sign({ userId: '123' }, secret);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const authChain = chainResult.injected.find((i) => i.targetSkill === "auth");
- expect(authChain).toBeDefined();
- });
-
- test("auth file with middleware export chains to routing-middleware", () => {
- const filePath = "/project/middleware.ts";
- const fileContent = [
- `import { clerkMiddleware } from '@clerk/nextjs/server';`,
- `export default function middleware(req) { return clerkMiddleware()(req); }`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const routingChain = chainResult.injected.find((i) => i.targetSkill === "routing-middleware");
- expect(routingChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // swr chainTo rules
- // -------------------------------------------------------------------------
-
- test("swr file with direct AI provider SDK chains to ai-sdk", () => {
- const filePath = "/project/components/dashboard.tsx";
- const fileContent = [
- `import useSWR from 'swr';`,
- `import { OpenAI } from 'openai';`,
- `const openai = new OpenAI();`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find((i) => i.targetSkill === "ai-sdk");
- expect(aiSdkChain).toBeDefined();
- });
-
- test("swr file with @vercel/kv chains to vercel-storage", () => {
- const filePath = "/project/lib/data.ts";
- const fileContent = [
- `import useSWR from 'swr';`,
- `import { kv } from '@vercel/kv';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find((i) => i.targetSkill === "vercel-storage");
- expect(storageChain).toBeDefined();
- });
-
- test("swr file with useEffect+fetch chains to swr", () => {
- const filePath = "/project/components/list.tsx";
- const fileContent = [
- `'use client';`,
- `import { useEffect, useState } from 'react';`,
- `useEffect(() => {`,
- ` fetch('/api/items').then(r => r.json()).then(setItems);`,
- `}, []);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const swrChain = chainResult.injected.find((i) => i.targetSkill === "swr");
- expect(swrChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // ai-elements chainTo rules
- // -------------------------------------------------------------------------
-
- test("ai-elements file with raw message.content chains to ai-sdk", () => {
- const filePath = "/project/components/chat.tsx";
- const fileContent = [
- `'use client';`,
- `import { Message } from '@/components/ai-elements/message';`,
- `{message.content}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find((i) => i.targetSkill === "ai-sdk");
- expect(aiSdkChain).toBeDefined();
- });
-
- test("ai-elements file with react-markdown chains to ai-elements", () => {
- const filePath = "/project/components/response.tsx";
- const fileContent = [
- `import ReactMarkdown from 'react-markdown';`,
- `{text}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiElementsChain = chainResult.injected.find((i) => i.targetSkill === "ai-elements");
- expect(aiElementsChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // runtime-cache chainTo rules
- // -------------------------------------------------------------------------
-
- test("runtime-cache file with @vercel/kv chains to vercel-storage", () => {
- const filePath = "/project/lib/cache.ts";
- const fileContent = [
- `import { unstable_cache } from 'next/cache';`,
- `import { kv } from '@vercel/kv';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find((i) => i.targetSkill === "vercel-storage");
- expect(storageChain).toBeDefined();
- });
-
- test("runtime-cache file with ioredis chains to vercel-storage", () => {
- const filePath = "/project/lib/redis.ts";
- const fileContent = [
- `import Redis from 'ioredis';`,
- `const redis = new Redis();`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find((i) => i.targetSkill === "vercel-storage");
- expect(storageChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // vercel-sandbox chainTo rules
- // -------------------------------------------------------------------------
-
- test("vercel-sandbox file with vm2 import chains to vercel-sandbox", () => {
- const filePath = "/project/lib/executor.ts";
- const fileContent = [
- `import { VM } from 'vm2';`,
- `const vm = new VM();`,
- `vm.run(userCode);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const sandboxChain = chainResult.injected.find((i) => i.targetSkill === "vercel-sandbox");
- expect(sandboxChain).toBeDefined();
- });
-
- test("vercel-sandbox file with child_process exec chains to ai-sdk", () => {
- const filePath = "/project/lib/executor.ts";
- const fileContent = [
- `import { exec } from 'child_process';`,
- `exec(command, { shell: true }, callback);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find((i) => i.targetSkill === "ai-sdk");
- expect(aiSdkChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // ai-gateway chainTo rules
- // -------------------------------------------------------------------------
-
- test("ai-gateway file with provider API key chains to ai-sdk", () => {
- const filePath = "/project/lib/ai.ts";
- const fileContent = [
- `import { gateway } from 'ai';`,
- `const key = process.env.ANTHROPIC_API_KEY;`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiSdkChain = chainResult.injected.find((i) => i.targetSkill === "ai-sdk");
- expect(aiSdkChain).toBeDefined();
- });
-
- test("ai-gateway file with cost tracking tags chains to observability (skipIfFileContains)", () => {
- const filePath = "/project/lib/ai.ts";
- const fileContent = [
- `import { gateway } from 'ai';`,
- `const model = gateway({ tags: ['production'], user: userId });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const obsChain = chainResult.injected.find((i) => i.targetSkill === "observability");
- expect(obsChain).toBeDefined();
- });
-
- test("ai-gateway observability chain is skipped when @vercel/analytics present", () => {
- const filePath = "/project/lib/ai.ts";
- const fileContent = [
- `import { gateway } from 'ai';`,
- `import { track } from '@vercel/analytics';`,
- `const model = gateway({ tags: ['production'], user: userId });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const obsChain = chainResult.injected.find((i) => i.targetSkill === "observability");
- expect(obsChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // vercel-flags chainTo rules
- // -------------------------------------------------------------------------
-
- test("vercel-flags file with LaunchDarkly SDK chains to vercel-flags", () => {
- const filePath = "/project/lib/flags.ts";
- const fileContent = `import LaunchDarkly from 'launchdarkly-node-server-sdk';\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const flagsChain = chainResult.injected.find((i) => i.targetSkill === "vercel-flags");
- expect(flagsChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // satori chainTo rules
- // -------------------------------------------------------------------------
-
- test("satori file with canvas import chains to vercel-functions", () => {
- const filePath = "/project/lib/og.ts";
- const fileContent = [
- `import { createCanvas } from 'canvas';`,
- `const canvas = createCanvas(1200, 630);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const functionsChain = chainResult.injected.find((i) => i.targetSkill === "vercel-functions");
- expect(functionsChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // vercel-firewall chainTo rules
- // -------------------------------------------------------------------------
-
- test("vercel-firewall file with manual IP blocking chains to routing-middleware", () => {
- const filePath = "/project/middleware.ts";
- const fileContent = [
- `const ip = req.ip;`,
- `const denyList = ['1.2.3.4'];`,
- `if (denyList.includes(ip)) return new Response('Blocked', { status: 403 });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const routingChain = chainResult.injected.find((i) => i.targetSkill === "routing-middleware");
- expect(routingChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // nextjs additional chainTo rules
- // -------------------------------------------------------------------------
-
- test("nextjs file with raw AI fetch URL chains to ai-gateway", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `export async function POST(req: Request) {`,
- ` const res = await fetch('https://api.openai.com/v1/chat/completions', {`,
- ` headers: { Authorization: 'Bearer ' + key },`,
- ` });`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const gatewayChain = chainResult.injected.find((i) => i.targetSkill === "ai-gateway");
- expect(gatewayChain).toBeDefined();
- });
-
- test("nextjs raw AI fetch chain is skipped when ai-sdk already imported (skipIfFileContains)", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- `// legacy: fetch('https://api.openai.com/v1/...')`,
- `const result = await generateText({ model: 'openai/gpt-5.4' });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const rawFetchChain = chainResult.injected.find(
- (i) => i.targetSkill === "ai-gateway" && i.message?.includes("Raw AI provider fetch"),
- );
- expect(rawFetchChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // turborepo chainTo rules
- // -------------------------------------------------------------------------
-
- test("turborepo file with @vercel/postgres import chains to vercel-storage", () => {
- const filePath = "/project/packages/db/index.ts";
- const fileContent = `import { sql } from '@vercel/postgres';\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find((i) => i.targetSkill === "vercel-storage");
- expect(storageChain).toBeDefined();
- });
-
- // -------------------------------------------------------------------------
- // Iteration 2: under-covered cross-skill chain rules
- // -------------------------------------------------------------------------
-
- test("vercel-storage file with @vercel/postgres chains to vercel-storage with migration message", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = `import { sql } from '@vercel/postgres';\nexport const getUsers = () => sql\`SELECT * FROM users\`;\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find(
- (i) => i.targetSkill === "vercel-storage" && i.message?.includes("sunset"),
- );
- expect(storageChain).toBeDefined();
- });
-
- test("vercel-storage @vercel/postgres chain skipped when @neondatabase present", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = [
- `import { neon } from '@neondatabase/serverless';`,
- `// migrated from @vercel/postgres`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const storageChain = chainResult.injected.find(
- (i) => i.targetSkill === "vercel-storage" && i.message?.includes("sunset"),
- );
- expect(storageChain).toBeUndefined();
- });
-
- test("auth file with bcrypt import chains to auth with managed-auth message", () => {
- const filePath = "/project/lib/auth.ts";
- const fileContent = [
- `import bcrypt from 'bcrypt';`,
- `export async function hashPassword(pwd: string) {`,
- ` return bcrypt.hash(pwd, 10);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const authChain = chainResult.injected.find(
- (i) => i.targetSkill === "auth" && i.message?.includes("managed"),
- );
- expect(authChain).toBeDefined();
- });
-
- test("auth file with argon2 import chains to auth with managed-auth message", () => {
- const filePath = "/project/lib/auth.ts";
- const fileContent = `import argon2 from 'argon2';\nconst hash = await argon2.hash(password);\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const authChain = chainResult.injected.find(
- (i) => i.targetSkill === "auth" && i.message?.includes("managed"),
- );
- expect(authChain).toBeDefined();
- });
-
- test("auth bcrypt chain skipped when @clerk present (skipIfFileContains)", () => {
- const filePath = "/project/lib/auth.ts";
- const fileContent = [
- `import { clerkClient } from '@clerk/nextjs/server';`,
- `// legacy: import bcrypt from 'bcrypt';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const authChain = chainResult.injected.find(
- (i) => i.targetSkill === "auth" && i.message?.includes("bcrypt"),
- );
- expect(authChain).toBeUndefined();
- });
-
- test("payments file with paypal import chains to payments with Stripe-marketplace message", () => {
- const filePath = "/project/lib/checkout.ts";
- const fileContent = [
- `import paypal from '@paypal/checkout-server-sdk';`,
- `const client = new paypal.core.PayPalHttpClient(environment);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const paymentsChain = chainResult.injected.find(
- (i) => i.targetSkill === "payments" && i.message?.includes("Stripe"),
- );
- expect(paymentsChain).toBeDefined();
- });
-
- test("payments file with braintree import chains to payments with Stripe-marketplace message", () => {
- const filePath = "/project/lib/payments.ts";
- const fileContent = `import braintree from 'braintree';\nconst gateway = new braintree.BraintreeGateway({});\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const paymentsChain = chainResult.injected.find(
- (i) => i.targetSkill === "payments" && i.message?.includes("Stripe"),
- );
- expect(paymentsChain).toBeDefined();
- });
-
- test("chat-sdk file with @slack/web-api import chains to chat-sdk", () => {
- const filePath = "/project/lib/slack-bot.ts";
- const fileContent = [
- `import { WebClient } from '@slack/web-api';`,
- `const web = new WebClient(process.env.SLACK_BOT_TOKEN);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const chatSdkChain = chainResult.injected.find((i) => i.targetSkill === "chat-sdk");
- expect(chatSdkChain).toBeDefined();
- });
-
- test("observability file with winston import chains to observability with Vercel-native message", () => {
- const filePath = "/project/lib/logger.ts";
- const fileContent = [
- `import winston from 'winston';`,
- `export const logger = winston.createLogger({`,
- ` level: 'info',`,
- ` transports: [new winston.transports.Console()],`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const obsChain = chainResult.injected.find(
- (i) => i.targetSkill === "observability" && i.message?.includes("Vercel"),
- );
- expect(obsChain).toBeDefined();
- });
-
- test("observability file with pino import chains to observability", () => {
- const filePath = "/project/lib/logger.ts";
- const fileContent = `import pino from 'pino';\nconst logger = pino();\n`;
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const obsChain = chainResult.injected.find((i) => i.targetSkill === "observability");
- expect(obsChain).toBeDefined();
- });
-
- test("observability winston chain skipped when @opentelemetry present", () => {
- const filePath = "/project/lib/logger.ts";
- const fileContent = [
- `import { trace } from '@opentelemetry/api';`,
- `import winston from 'winston';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const obsChain = chainResult.injected.find(
- (i) => i.targetSkill === "observability" && i.message?.includes("winston"),
- );
- expect(obsChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // Negative tests — files that should NOT trigger chains
- // -------------------------------------------------------------------------
-
- test("clean Next.js server component does not trigger any chains", () => {
- const filePath = "/project/app/page.tsx";
- const fileContent = [
- `export default function Page() {`,
- ` return Hello World
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- expect(chainResult.injected.length).toBe(0);
- });
-
- test("clean AI SDK usage with gateway does not trigger provider chains", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- `const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` prompt: 'Hello!',`,
- `});`,
- `return result.toUIMessageStreamResponse();`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // No deprecated patterns, so no chains for outdated API keys or direct providers
- const providerKeyChain = chainResult.injected.find(
- (i) => i.message?.includes("API key") || i.message?.includes("Provider-specific"),
- );
- expect(providerKeyChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // ncc chainTo rules
- // -------------------------------------------------------------------------
-
- test("ncc file with serverless bundle chains to vercel-functions", () => {
- const filePath = "/project/scripts/build.sh";
- const fileContent = [
- `#!/bin/bash`,
- `ncc build api/handler.ts -o dist`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const functionsChain = chainResult.injected.find((i) => i.targetSkill === "vercel-functions");
- if (matched.includes("ncc")) {
- expect(functionsChain).toBeDefined();
- expect(functionsChain!.message).toContain("serverless");
- }
- });
-
- test("ncc serverless chain is skipped when vercel.json present (skipIfFileContains)", () => {
- const filePath = "/project/scripts/build.sh";
- const fileContent = [
- `#!/bin/bash`,
- `ncc build api/serverless.ts -o dist`,
- `# vercel.json already configured`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'vercel\.json' should suppress
- const functionsChainFromNcc = chainResult.injected.find(
- (i) => i.sourceSkill === "ncc" && i.targetSkill === "vercel-functions" && i.message?.includes("serverless"),
- );
- expect(functionsChainFromNcc).toBeUndefined();
- });
-
- test("ncc build chains to deployments-cicd", () => {
- const filePath = "/project/build.ts";
- const fileContent = [
- `import ncc from '@vercel/ncc';`,
- `const { code } = await ncc('./src/index.ts');`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const cicdChain = chainResult.injected.find((i) => i.targetSkill === "deployments-cicd");
- if (matched.includes("ncc")) {
- expect(cicdChain).toBeDefined();
- expect(cicdChain!.message).toContain("deploy");
- }
- });
-
- // -------------------------------------------------------------------------
- // cms chainTo rules
- // -------------------------------------------------------------------------
-
- test("cms file with getStaticProps chains to nextjs", () => {
- const filePath = "/project/pages/blog/[slug].tsx";
- const fileContent = [
- `import { createClient } from '@sanity/client';`,
- ``,
- `export async function getStaticProps({ params }) {`,
- ` const post = await client.fetch('*[slug.current == $slug]', params);`,
- ` return { props: { post }, revalidate: 60 };`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("cms")) {
- const nextjsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "cms" && i.targetSkill === "nextjs",
- );
- expect(nextjsChain).toBeDefined();
- expect(nextjsChain!.message).toContain("Pages Router");
- }
- });
-
- test("cms getStaticProps chain is skipped when App Router patterns present (skipIfFileContains)", () => {
- const filePath = "/project/app/blog/[slug]/page.tsx";
- const fileContent = [
- `import { createClient } from '@sanity/client';`,
- ``,
- `export function generateStaticParams() {`,
- ` return [{ slug: 'hello' }];`,
- `}`,
- ``,
- `// Legacy comment: getStaticProps was here`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'generateStaticParams' should suppress
- const pagesRouterChain = chainResult.injected.find(
- (i) => i.sourceSkill === "cms" && i.targetSkill === "nextjs" && i.message?.includes("Pages Router"),
- );
- expect(pagesRouterChain).toBeUndefined();
- });
-
- test("cms file with revalidatePath chains to runtime-cache", () => {
- const filePath = "/project/app/api/revalidate/route.ts";
- const fileContent = [
- `import { createClient } from 'contentful';`,
- `import { revalidatePath } from 'next/cache';`,
- ``,
- `export async function POST(req: Request) {`,
- ` revalidatePath('/blog');`,
- ` return Response.json({ revalidated: true });`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("cms")) {
- const cacheChain = chainResult.injected.find(
- (i) => i.sourceSkill === "cms" && i.targetSkill === "runtime-cache",
- );
- expect(cacheChain).toBeDefined();
- expect(cacheChain!.message).toContain("Revalidation");
- }
- });
-
- // -------------------------------------------------------------------------
- // ai-generation-persistence chainTo rules
- // -------------------------------------------------------------------------
-
- test("ai-generation-persistence file with streamText chains to ai-gateway", () => {
- const filePath = "/project/app/api/generate/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` prompt: req.body,`,
- ` });`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("ai-generation-persistence")) {
- const gatewayChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-generation-persistence" && i.targetSkill === "ai-gateway",
- );
- expect(gatewayChain).toBeDefined();
- expect(gatewayChain!.message).toContain("cost");
- }
- });
-
- test("ai-generation-persistence gateway chain is skipped when @ai-sdk/gateway present (skipIfFileContains)", () => {
- const filePath = "/project/app/api/generate/route.ts";
- const fileContent = [
- `import { streamText, gateway } from 'ai';`,
- ``,
- `const result = streamText({`,
- ` model: gateway('openai/gpt-5.4', { tags: ['prod'] }),`,
- ` prompt: 'Hello',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'gateway(' should suppress
- const gatewayChainFromPersistence = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-generation-persistence" && i.targetSkill === "ai-gateway",
- );
- expect(gatewayChainFromPersistence).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // observability chainTo rules
- // -------------------------------------------------------------------------
-
- test("observability file with console.log error handling chains to vercel-functions", () => {
- const filePath = "/project/app/api/data/route.ts";
- const fileContent = [
- `export async function GET(req: Request) {`,
- ` try {`,
- ` const data = await fetchData();`,
- ` return Response.json(data);`,
- ` } catch (err) {`,
- ` console.log("error", err);`,
- ` return Response.json({ error: 'failed' }, { status: 500 });`,
- ` }`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("observability")) {
- const functionsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "observability" && i.targetSkill === "vercel-functions",
- );
- expect(functionsChain).toBeDefined();
- expect(functionsChain!.message).toContain("Console.log");
- }
- });
-
- test("observability console.log chain is skipped when Sentry present (skipIfFileContains)", () => {
- const filePath = "/project/app/api/data/route.ts";
- const fileContent = [
- `import * as Sentry from '@sentry/nextjs';`,
- ``,
- `try { await fetchData(); } catch (err) {`,
- ` console.log("error", err);`,
- ` Sentry.captureException(err);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'captureException|@sentry/' should suppress
- const consoleChain = chainResult.injected.find(
- (i) => i.sourceSkill === "observability" && i.targetSkill === "vercel-functions" && i.message?.includes("Console.log"),
- );
- expect(consoleChain).toBeUndefined();
- });
-
- test("observability file with Sentry SDK chains to nextjs", () => {
- const filePath = "/project/sentry.server.config.ts";
- const fileContent = [
- `import * as Sentry from '@sentry/nextjs';`,
- ``,
- `Sentry.init({`,
- ` dsn: process.env.SENTRY_DSN,`,
- ` tracesSampleRate: 1.0,`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("observability")) {
- const nextjsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "observability" && i.targetSkill === "nextjs",
- );
- expect(nextjsChain).toBeDefined();
- expect(nextjsChain!.message).toContain("Sentry");
- }
- });
-
- // -------------------------------------------------------------------------
- // sign-in-with-vercel chainTo rules
- // -------------------------------------------------------------------------
-
- test("sign-in-with-vercel file with NextAuth chains to auth", () => {
- const filePath = "/project/app/api/auth/route.ts";
- const fileContent = [
- `import NextAuth from 'next-auth';`,
- `import { vercelProvider } from './vercel-provider';`,
- ``,
- `const handler = NextAuth({ providers: [vercelProvider] });`,
- `export { handler as GET, handler as POST };`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("sign-in-with-vercel")) {
- const authChain = chainResult.injected.find(
- (i) => i.sourceSkill === "sign-in-with-vercel" && i.targetSkill === "auth",
- );
- expect(authChain).toBeDefined();
- expect(authChain!.message).toContain("NextAuth");
- }
- });
-
- test("sign-in-with-vercel NextAuth chain is skipped when Clerk present (skipIfFileContains)", () => {
- const filePath = "/project/app/api/auth/route.ts";
- const fileContent = [
- `import NextAuth from 'next-auth';`,
- `import { clerkMiddleware } from '@clerk/nextjs/server';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'clerkMiddleware|@clerk/' should suppress
- const authChainFromSiVercel = chainResult.injected.find(
- (i) => i.sourceSkill === "sign-in-with-vercel" && i.targetSkill === "auth" && i.message?.includes("NextAuth"),
- );
- expect(authChainFromSiVercel).toBeUndefined();
- });
-
- test("sign-in-with-vercel VERCEL_CLIENT_ID chains to env-vars", () => {
- const filePath = "/project/lib/vercel-auth.ts";
- const fileContent = [
- `const clientId = process.env.VERCEL_CLIENT_ID;`,
- `const redirectUri = 'https://app.example.com/callback';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("sign-in-with-vercel")) {
- const envChain = chainResult.injected.find(
- (i) => i.sourceSkill === "sign-in-with-vercel" && i.targetSkill === "env-vars",
- );
- expect(envChain).toBeDefined();
- expect(envChain!.message).toContain("environment variable");
- }
- });
-
- // -------------------------------------------------------------------------
- // json-render chainTo rules
- // -------------------------------------------------------------------------
-
- test("json-render file with message.content chains to ai-sdk", () => {
- const filePath = "/project/components/chat-message.tsx";
- const fileContent = [
- `export function ChatMessage({ message }) {`,
- ` return {message.content}
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("json-render")) {
- const aiSdkChain = chainResult.injected.find(
- (i) => i.sourceSkill === "json-render" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChain).toBeDefined();
- expect(aiSdkChain!.message).toContain("v5");
- }
- });
-
- test("json-render file with ReactMarkdown chains to ai-elements", () => {
- const filePath = "/project/components/ai-message.tsx";
- const fileContent = [
- `import ReactMarkdown from 'react-markdown';`,
- ``,
- `export function AIMessage({ text }: { text: string }) {`,
- ` return {text};`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("json-render")) {
- const aiElementsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "json-render" && i.targetSkill === "ai-elements",
- );
- expect(aiElementsChain).toBeDefined();
- expect(aiElementsChain!.message).toContain("markdown");
- }
- });
-
- // -------------------------------------------------------------------------
- // deployments-cicd chainTo rules
- // -------------------------------------------------------------------------
-
- test("deployments-cicd file with node-cron chains to cron-jobs", () => {
- const filePath = "/project/scripts/deploy.ts";
- const fileContent = [
- `import cron from 'node-cron';`,
- ``,
- `cron.schedule('0 2 * * *', async () => {`,
- ` await deployToProduction();`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("deployments-cicd")) {
- const cronChain = chainResult.injected.find(
- (i) => i.sourceSkill === "deployments-cicd" && i.targetSkill === "cron-jobs",
- );
- expect(cronChain).toBeDefined();
- expect(cronChain!.message).toContain("cron");
- }
- });
-
- // -------------------------------------------------------------------------
- // micro chainTo rules
- // -------------------------------------------------------------------------
-
- test("micro file with micro import chains to vercel-functions", () => {
- const filePath = "/project/api/hello.ts";
- const fileContent = [
- `import { send, json } from 'micro';`,
- ``,
- `export default async (req, res) => {`,
- ` const body = await json(req);`,
- ` send(res, 200, { hello: body.name });`,
- `};`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("micro")) {
- const functionsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "micro" && i.targetSkill === "vercel-functions",
- );
- expect(functionsChain).toBeDefined();
- expect(functionsChain!.message).toContain("micro");
- }
- });
-
- // -------------------------------------------------------------------------
- // bootstrap chainTo rules
- // -------------------------------------------------------------------------
-
- test("bootstrap file with @vercel/postgres chains to vercel-storage", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = [
- `import { sql } from '@vercel/postgres';`,
- `const POSTGRES_URL = process.env.POSTGRES_URL;`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("bootstrap")) {
- const storageChain = chainResult.injected.find(
- (i) => i.sourceSkill === "bootstrap" && i.targetSkill === "vercel-storage",
- );
- expect(storageChain).toBeDefined();
- expect(storageChain!.message).toContain("sunset");
- }
- });
-
- // -------------------------------------------------------------------------
- // next-forge chainTo rules
- // -------------------------------------------------------------------------
-
- test("next-forge file with middleware export chains to routing-middleware", () => {
- const filePath = "/project/apps/app/middleware.ts";
- const fileContent = [
- `import { clerkMiddleware } from '@clerk/nextjs/server';`,
- ``,
- `export default function middleware(req) {`,
- ` return clerkMiddleware()(req);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("next-forge")) {
- const routingChain = chainResult.injected.find(
- (i) => i.sourceSkill === "next-forge" && i.targetSkill === "routing-middleware",
- );
- expect(routingChain).toBeDefined();
- expect(routingChain!.message).toContain("middleware");
- }
- });
-
- // -------------------------------------------------------------------------
- // Chain cap with >2 newly added rules (DEFAULT_CHAIN_CAP=2 enforcement)
- // -------------------------------------------------------------------------
-
- test("chain cap limits injection to 2 when >2 new rules match (default cap)", () => {
- // Simulate a file that triggers chains from 3 different source skills
- // using realistic patterns — we use unit-level test for precise control
- const mod = await import("../hooks/posttooluse-validate.mjs");
- const rci = mod.runChainInjection;
-
- const chainMap = new Map([
- ["source-1", [{ pattern: "MATCH_1", targetSkill: "micro" }]],
- ["source-2", [{ pattern: "MATCH_2", targetSkill: "swr" }]],
- ["source-3", [{ pattern: "MATCH_3", targetSkill: "cron-jobs" }]],
- ]);
-
- const fileContent = `MATCH_1;\nMATCH_2;\nMATCH_3;\n`;
- // No VERCEL_PLUGIN_CHAIN_CAP set — default is 2
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = rci(
- fileContent,
- ["source-1", "source-2", "source-3"],
- chainMap,
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(2);
- });
-
- // -------------------------------------------------------------------------
- // Dedup prevents re-injection of already-seen chained skills (real-world)
- // -------------------------------------------------------------------------
-
- test("dedup prevents re-injection when target skill already seen (real-world scenario)", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = [
- `import { sql } from '@vercel/postgres';`,
- `export const getUsers = () => sql\`SELECT * FROM users\`;`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
-
- // Simulate ai-gateway and nextjs already seen — common scenario in a session
- const envWithSeen: any = { VERCEL_PLUGIN_SEEN_SKILLS: "nextjs,vercel-storage,ai-gateway" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, envWithSeen);
-
- // nextjs is a common target from vercel-storage's @vercel/postgres chain — should be suppressed
- const nextjsChain = chainResult.injected.find(
- (i) => i.targetSkill === "nextjs",
- );
- expect(nextjsChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // Phase 1: vercel-cli chainTo rules
- // -------------------------------------------------------------------------
-
- test("vercel-cli vercel.json with crons config chains to cron-jobs", () => {
- const filePath = "/project/vercel.json";
- const fileContent = JSON.stringify({
- "crons": [{ "path": "/api/cron", "schedule": "0 * * * *" }],
- }, null, 2);
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("vercel-cli")) {
- const cronChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-cli" && i.targetSkill === "cron-jobs",
- );
- expect(cronChain).toBeDefined();
- expect(cronChain!.message).toContain("Cron");
- }
- });
-
- test("vercel-cli vercel.json with functions config chains to vercel-functions", () => {
- const filePath = "/project/vercel.json";
- const fileContent = JSON.stringify({
- "functions": { "api/**/*.ts": { "maxDuration": 60 } },
- }, null, 2);
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("vercel-cli")) {
- const functionsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-cli" && i.targetSkill === "vercel-functions",
- );
- expect(functionsChain).toBeDefined();
- expect(functionsChain!.message).toContain("Functions");
- }
- });
-
- test("vercel-cli vercel.json with redirects chains to routing-middleware", () => {
- const filePath = "/project/vercel.json";
- const fileContent = JSON.stringify({
- "redirects": [{ "source": "/old", "destination": "/new", "permanent": true }],
- }, null, 2);
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("vercel-cli")) {
- const routingChain = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-cli" && i.targetSkill === "routing-middleware",
- );
- expect(routingChain).toBeDefined();
- expect(routingChain!.message).toContain("Routing");
- }
- });
-
- test("vercel-cli functions chain is skipped when crons also present (skipIfFileContains)", () => {
- const filePath = "/project/vercel.json";
- const fileContent = JSON.stringify({
- "crons": [{ "path": "/api/cron", "schedule": "0 * * * *" }],
- "functions": { "api/**/*.ts": { "maxDuration": 60 } },
- }, null, 2);
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: '"crons"\s*:' should suppress the functions chain
- const functionsChainFromCli = chainResult.injected.find(
- (i) => i.sourceSkill === "vercel-cli" && i.targetSkill === "vercel-functions",
- );
- expect(functionsChainFromCli).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // Phase 1: marketplace chainTo rules
- // -------------------------------------------------------------------------
-
- test("marketplace file with Neon env var chains to vercel-storage", () => {
- const filePath = "/project/lib/db.ts";
- const fileContent = [
- `const url = process.env.NEON_DATABASE_URL;`,
- `const pool = new Pool({ connectionString: url });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("marketplace")) {
- const storageChain = chainResult.injected.find(
- (i) => i.sourceSkill === "marketplace" && i.targetSkill === "vercel-storage",
- );
- expect(storageChain).toBeDefined();
- expect(storageChain!.message).toContain("Database");
- }
- });
-
- test("marketplace file with Clerk env var chains to auth", () => {
- const filePath = "/project/.env.local";
- const fileContent = [
- `CLERK_SECRET_KEY=sk_test_abc123`,
- `NEXT_PUBLIC_CLERK_PUBLISHABLE_KEY=pk_test_xyz`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("marketplace")) {
- const authChain = chainResult.injected.find(
- (i) => i.sourceSkill === "marketplace" && i.targetSkill === "auth",
- );
- expect(authChain).toBeDefined();
- expect(authChain!.message).toContain("Clerk");
- }
- });
-
- test("marketplace file with Sanity env var chains to cms", () => {
- const filePath = "/project/.env.local";
- const fileContent = [
- `SANITY_PROJECT_ID=abc123`,
- `SANITY_DATASET=production`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("marketplace")) {
- const cmsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "marketplace" && i.targetSkill === "cms",
- );
- expect(cmsChain).toBeDefined();
- expect(cmsChain!.message).toContain("CMS");
- }
- });
-
- // -------------------------------------------------------------------------
- // Phase 1: v0-dev chainTo rules
- // -------------------------------------------------------------------------
-
- test("v0-dev file with shadcn component import chains to shadcn", () => {
- const filePath = "/project/components/generated.tsx";
- const fileContent = [
- `import { Button } from '@/components/ui/button';`,
- `import { Card } from '@/components/ui/card';`,
- ``,
- `export function GeneratedUI() {`,
- ` return ;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("v0-dev")) {
- const shadcnChain = chainResult.injected.find(
- (i) => i.sourceSkill === "v0-dev" && i.targetSkill === "shadcn",
- );
- expect(shadcnChain).toBeDefined();
- expect(shadcnChain!.message).toContain("shadcn");
- }
- });
-
- test("v0-dev file with AI SDK usage chains to ai-sdk", () => {
- const filePath = "/project/components/chat.tsx";
- const fileContent = [
- `import { useChat } from '@ai-sdk/react';`,
- `import { Button } from '@/components/ui/button';`,
- ``,
- `export function Chat() {`,
- ` const { messages } = useChat({ transport: new DefaultChatTransport({ api: '/api/chat' }) });`,
- ` return {messages.map(m =>
{m.content}
)}
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("v0-dev")) {
- const aiSdkChain = chainResult.injected.find(
- (i) => i.sourceSkill === "v0-dev" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChain).toBeDefined();
- expect(aiSdkChain!.message).toContain("AI SDK");
- }
- });
-
- test("v0-dev ai-sdk chain is skipped when modern patterns present (skipIfFileContains)", () => {
- const filePath = "/project/components/chat.tsx";
- const fileContent = [
- `import { useChat } from '@ai-sdk/react';`,
- `import { convertToModelMessages } from 'ai';`,
- `import { Button } from '@/components/ui/button';`,
- ``,
- `// File already using v6 patterns`,
- `const result = streamText({ model: 'openai/gpt-5.4' });`,
- `return result.toUIMessageStreamResponse();`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'convertToModelMessages|toUIMessageStreamResponse' should suppress
- const aiSdkChainFromV0 = chainResult.injected.find(
- (i) => i.sourceSkill === "v0-dev" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChainFromV0).toBeUndefined();
- });
-
- test("v0-dev file with next/image import chains to nextjs", () => {
- const filePath = "/project/components/hero.tsx";
- const fileContent = [
- `import Image from 'next/image';`,
- `import { Button } from '@/components/ui/button';`,
- ``,
- `export function Hero() {`,
- ` return ;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("v0-dev")) {
- const nextjsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "v0-dev" && i.targetSkill === "nextjs",
- );
- expect(nextjsChain).toBeDefined();
- expect(nextjsChain!.message).toContain("Next.js");
- }
- });
-
- // -------------------------------------------------------------------------
- // Phase 1: investigation-mode chainTo rules
- // -------------------------------------------------------------------------
-
- test("investigation-mode file with workflow imports chains to workflow", () => {
- const filePath = "/project/workflows/review.ts";
- const fileContent = [
- `import { createWorkflow } from 'workflow';`,
- ``,
- `const wf = createWorkflow({ id: 'review' });`,
- `// Debugging: workflow stuck at step 3`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("investigation-mode")) {
- const workflowChain = chainResult.injected.find(
- (i) => i.sourceSkill === "investigation-mode" && i.targetSkill === "workflow",
- );
- expect(workflowChain).toBeDefined();
- expect(workflowChain!.message).toContain("Workflow");
- }
- });
-
- test("investigation-mode file with VERCEL_URL chains to deployments-cicd", () => {
- const filePath = "/project/lib/config.ts";
- const fileContent = [
- `const baseUrl = process.env.VERCEL_URL`,
- ` ? \`https://\${process.env.VERCEL_URL}\``,
- ` : 'http://localhost:3000';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("investigation-mode")) {
- const deployChain = chainResult.injected.find(
- (i) => i.sourceSkill === "investigation-mode" && i.targetSkill === "deployments-cicd",
- );
- expect(deployChain).toBeDefined();
- expect(deployChain!.message).toContain("Deployment");
- }
- });
-
- test("investigation-mode deployment chain is skipped when vercel inspect present (skipIfFileContains)", () => {
- const filePath = "/project/scripts/debug.sh";
- const fileContent = [
- `#!/bin/bash`,
- `echo "Checking VERCEL_URL..."`,
- `vercel inspect $DEPLOYMENT_ID`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'vercel\s+inspect|vercel\s+logs' should suppress
- const deployChainFromInvestigation = chainResult.injected.find(
- (i) => i.sourceSkill === "investigation-mode" && i.targetSkill === "deployments-cicd",
- );
- expect(deployChainFromInvestigation).toBeUndefined();
- });
-
- test("investigation-mode file with @vercel/analytics chains to observability", () => {
- const filePath = "/project/lib/analytics.ts";
- const fileContent = [
- `import { track } from '@vercel/analytics';`,
- ``,
- `export function trackEvent(name: string, data: Record) {`,
- ` track(name, data);`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("investigation-mode")) {
- const obsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "investigation-mode" && i.targetSkill === "observability",
- );
- expect(obsChain).toBeDefined();
- expect(obsChain!.message).toContain("Observability");
- }
- });
-
- // -------------------------------------------------------------------------
- // Phase 1: verification chainTo rules
- // -------------------------------------------------------------------------
-
- test("verification file with process.env references chains to env-vars", () => {
- const filePath = "/project/app/api/data/route.ts";
- const fileContent = [
- `export async function GET() {`,
- ` const apiKey = process.env.API_KEY;`,
- ` const dbUrl = process.env.DATABASE_URL;`,
- ` return Response.json({ ok: true });`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("verification")) {
- const envChain = chainResult.injected.find(
- (i) => i.sourceSkill === "verification" && i.targetSkill === "env-vars",
- );
- expect(envChain).toBeDefined();
- expect(envChain!.message).toContain("environment variable");
- }
- });
-
- test("verification env-vars chain is skipped when .env.local referenced (skipIfFileContains)", () => {
- const filePath = "/project/app/api/data/route.ts";
- const fileContent = [
- `// Config pulled from .env.local via vercel env pull`,
- `const apiKey = process.env.API_KEY;`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'vercel\s+env\s+pull|\.env\.local' should suppress
- const envChainFromVerification = chainResult.injected.find(
- (i) => i.sourceSkill === "verification" && i.targetSkill === "env-vars",
- );
- expect(envChainFromVerification).toBeUndefined();
- });
-
- test("verification file with middleware.ts chains to routing-middleware", () => {
- const filePath = "/project/middleware.ts";
- const fileContent = [
- `import { NextResponse } from 'next/server';`,
- ``,
- `export function middleware(req) {`,
- ` return NextResponse.next();`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("verification")) {
- const routingChain = chainResult.injected.find(
- (i) => i.sourceSkill === "verification" && i.targetSkill === "routing-middleware",
- );
- expect(routingChain).toBeDefined();
- expect(routingChain!.message).toContain("Middleware");
- }
- });
-
- test("verification file with streamText chains to ai-sdk", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` prompt: 'Hello!',`,
- ` });`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("verification")) {
- const aiSdkChain = chainResult.injected.find(
- (i) => i.sourceSkill === "verification" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChain).toBeDefined();
- expect(aiSdkChain!.message).toContain("AI SDK");
- }
- });
-
- test("verification ai-sdk chain is skipped when modern patterns present (skipIfFileContains)", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- `import { DefaultChatTransport } from '@ai-sdk/react';`,
- ``,
- `const result = streamText({ model: 'openai/gpt-5.4' });`,
- `return result.toUIMessageStreamResponse();`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'toUIMessageStreamResponse|DefaultChatTransport' should suppress
- const aiSdkChainFromVerification = chainResult.injected.find(
- (i) => i.sourceSkill === "verification" && i.targetSkill === "ai-sdk",
- );
- expect(aiSdkChainFromVerification).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // Phase 2 enrichment: nextjs → shadcn
- // -------------------------------------------------------------------------
-
- test("nextjs file with @/components/ui import chains to shadcn", () => {
- const filePath = "/project/app/dashboard/page.tsx";
- const fileContent = [
- `import { Card } from '@/components/ui/card';`,
- `import { Button } from '@/components/ui/button';`,
- ``,
- `export default function Dashboard() {`,
- ` return ;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("nextjs");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const shadcnChain = chainResult.injected.find(
- (i) => i.sourceSkill === "nextjs" && i.targetSkill === "shadcn",
- );
- expect(shadcnChain).toBeDefined();
- expect(shadcnChain!.message).toContain("shadcn");
- });
-
- test("nextjs shadcn chain is skipped when components.json referenced (skipIfFileContains)", () => {
- const filePath = "/project/app/dashboard/page.tsx";
- const fileContent = [
- `// Project uses shadcn — see components.json`,
- `import { Card } from '@/components/ui/card';`,
- ``,
- `export default function Dashboard() {`,
- ` return Content;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'shadcn|components\.json' should suppress
- const shadcnChainFromNextjs = chainResult.injected.find(
- (i) => i.sourceSkill === "nextjs" && i.targetSkill === "shadcn",
- );
- expect(shadcnChainFromNextjs).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // Phase 2 enrichment: next-forge → auth
- // -------------------------------------------------------------------------
-
- test("next-forge file with Clerk patterns chains to auth", () => {
- const filePath = "/project/apps/app/lib/auth.ts";
- const fileContent = [
- `import { clerkMiddleware } from '@clerk/nextjs/server';`,
- `import { auth } from '@clerk/nextjs/server';`,
- ``,
- `export async function getUser() {`,
- ` const { userId } = await auth();`,
- ` return userId;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("next-forge")) {
- const authChain = chainResult.injected.find(
- (i) => i.sourceSkill === "next-forge" && i.targetSkill === "auth",
- );
- expect(authChain).toBeDefined();
- expect(authChain!.message).toContain("Clerk");
- }
- });
-
- test("next-forge auth chain is skipped when @auth0 present (skipIfFileContains)", () => {
- const filePath = "/project/apps/app/lib/auth.ts";
- const fileContent = [
- `import { clerkMiddleware } from '@clerk/nextjs/server';`,
- `import { auth0 } from '@auth0/nextjs-auth0';`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: '@auth0/|@descope/' should suppress
- const authChainFromNextForge = chainResult.injected.find(
- (i) => i.sourceSkill === "next-forge" && i.targetSkill === "auth",
- );
- expect(authChainFromNextForge).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // Phase 2 enrichment: next-forge → payments
- // -------------------------------------------------------------------------
-
- test("next-forge file with Stripe import chains to payments", () => {
- const filePath = "/project/apps/app/lib/stripe.ts";
- const fileContent = [
- `import Stripe from 'stripe';`,
- ``,
- `export const stripe = new Stripe(process.env.STRIPE_SECRET_KEY!);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("next-forge")) {
- const paymentsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "next-forge" && i.targetSkill === "payments",
- );
- expect(paymentsChain).toBeDefined();
- expect(paymentsChain!.message).toContain("Stripe");
- }
- });
-
- // -------------------------------------------------------------------------
- // Phase 2 enrichment: next-forge → email
- // -------------------------------------------------------------------------
-
- test("next-forge file with Resend import chains to email", () => {
- const filePath = "/project/apps/app/lib/email.ts";
- const fileContent = [
- `import { Resend } from 'resend';`,
- ``,
- `export const resend = new Resend(process.env.RESEND_API_KEY);`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- if (matched.includes("next-forge")) {
- const emailChain = chainResult.injected.find(
- (i) => i.sourceSkill === "next-forge" && i.targetSkill === "email",
- );
- expect(emailChain).toBeDefined();
- expect(emailChain!.message).toContain("Email");
- }
- });
-
- test("file with modern Upstash Redis does not trigger @vercel/kv chain", () => {
- const filePath = "/project/lib/cache.ts";
- const fileContent = [
- `import { Redis } from '@upstash/redis';`,
- `const redis = new Redis({ url: process.env.UPSTASH_REDIS_REST_URL });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // Should not trigger sunset @vercel/kv chain
- const kvChain = chainResult.injected.find(
- (i) => i.message?.includes("@vercel/kv"),
- );
- expect(kvChain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // AI SDK v5→v6 migration chain rules (ai-sdk chainTo)
- // -------------------------------------------------------------------------
-
- test("ai-sdk file with generateObject chains to ai-elements (skipIfFileContains Output.object)", () => {
- const filePath = "/project/app/api/extract/route.ts";
- const fileContent = [
- `import { generateObject } from 'ai';`,
- `import { z } from 'zod';`,
- ``,
- `const result = await generateObject({`,
- ` model: 'openai/gpt-5.4',`,
- ` schema: z.object({ name: z.string() }),`,
- ` prompt: 'Extract',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const aiElementsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.targetSkill === "ai-elements" && i.message?.includes("Output.object"),
- );
- expect(aiElementsChain).toBeDefined();
- });
-
- test("ai-sdk generateObject chain is skipped when Output.object already present", () => {
- const filePath = "/project/app/api/extract/route.ts";
- const fileContent = [
- `import { generateText, Output } from 'ai';`,
- ``,
- `const result = await generateText({`,
- ` model: 'openai/gpt-5.4',`,
- ` output: Output.object({ schema }),`,
- ` prompt: 'Extract',`,
- `});`,
- `// Legacy reference: generateObject was here`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const genObjChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.message?.includes("Output.object"),
- );
- expect(genObjChain).toBeUndefined();
- });
-
- test("ai-sdk file with maxSteps chains to ai-elements (skipIfFileContains stepCountIs)", () => {
- const filePath = "/project/app/api/agent/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- ``,
- `const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` maxSteps: 5,`,
- ` prompt: 'Plan a trip',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const maxStepsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.message?.includes("stepCountIs"),
- );
- expect(maxStepsChain).toBeDefined();
- });
-
- test("ai-sdk maxSteps chain is skipped when stepCountIs already present", () => {
- const filePath = "/project/app/api/agent/route.ts";
- const fileContent = [
- `import { streamText, stepCountIs } from 'ai';`,
- ``,
- `const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` stopWhen: stepCountIs(5),`,
- ` prompt: 'Plan a trip',`,
- `});`,
- `// Legacy comment: maxSteps: 5`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const maxStepsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.message?.includes("stepCountIs"),
- );
- expect(maxStepsChain).toBeUndefined();
- });
-
- test("ai-sdk file with toDataStreamResponse chains to ai-elements (skipIfFileContains toUIMessageStreamResponse)", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const result = streamText({ model: 'openai/gpt-5.4', prompt: 'Hello' });`,
- ` return result.toDataStreamResponse();`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const tdsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.targetSkill === "ai-elements" && i.message?.includes("toUIMessageStreamResponse"),
- );
- expect(tdsChain).toBeDefined();
- });
-
- test("ai-sdk toDataStreamResponse chain is skipped when toUIMessageStreamResponse present", () => {
- const filePath = "/project/app/api/chat/route.ts";
- const fileContent = [
- `import { streamText } from 'ai';`,
- `// Migrated from toDataStreamResponse`,
- `return result.toUIMessageStreamResponse();`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const tdsChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.message?.includes("toUIMessageStreamResponse") && i.message?.includes("v5"),
- );
- expect(tdsChain).toBeUndefined();
- });
-
- test("ai-sdk file with handleSubmit chains to ai-elements (skipIfFileContains sendMessage)", () => {
- const filePath = "/project/components/chat.tsx";
- const fileContent = [
- `'use client';`,
- `import { useChat } from '@ai-sdk/react';`,
- ``,
- `export function Chat() {`,
- ` const { messages, input, handleSubmit } = useChat();`,
- ` return ;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const handleSubmitChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.targetSkill === "ai-elements" && i.message?.includes("sendMessage"),
- );
- expect(handleSubmitChain).toBeDefined();
- });
-
- test("ai-sdk handleSubmit chain is skipped when sendMessage already present", () => {
- const filePath = "/project/components/chat.tsx";
- const fileContent = [
- `'use client';`,
- `import { useChat } from '@ai-sdk/react';`,
- `const { sendMessage } = useChat();`,
- `// Legacy: handleSubmit reference in comment`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const handleSubmitChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.message?.includes("sendMessage") && i.targetSkill === "ai-elements",
- );
- expect(handleSubmitChain).toBeUndefined();
- });
-
- test("ai-sdk file with useChat({ api: }) v5 pattern chains to ai-elements", () => {
- const filePath = "/project/components/chat.tsx";
- const fileContent = [
- `'use client';`,
- `import { useChat } from '@ai-sdk/react';`,
- ``,
- `export function Chat() {`,
- ` const { messages } = useChat({ api: '/api/chat' });`,
- ` return {messages.map(m =>
{m.content}
)}
;`,
- `}`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-sdk");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const useChatV5Chain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.targetSkill === "ai-elements" && i.message?.includes("DefaultChatTransport"),
- );
- expect(useChatV5Chain).toBeDefined();
- });
-
- test("ai-sdk useChat v5 api chain is skipped when DefaultChatTransport present", () => {
- const filePath = "/project/components/chat.tsx";
- const fileContent = [
- `'use client';`,
- `import { useChat, DefaultChatTransport } from '@ai-sdk/react';`,
- `const { messages } = useChat({ transport: new DefaultChatTransport({ api: '/api/chat' }) });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const useChatV5Chain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-sdk" && i.message?.includes("DefaultChatTransport") && i.message?.includes("v5"),
- );
- expect(useChatV5Chain).toBeUndefined();
- });
-
- // -------------------------------------------------------------------------
- // AI Gateway model deprecation chain rules (ai-gateway chainTo)
- // -------------------------------------------------------------------------
-
- test("ai-gateway file with gpt-4o model string chains to ai-sdk", () => {
- const filePath = "/project/lib/ai.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- ``,
- `const result = await generateText({`,
- ` model: 'openai/gpt-4o',`,
- ` prompt: 'Hello!',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-gateway");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const gpt4oChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.message?.includes("gpt-4o"),
- );
- expect(gpt4oChain).toBeDefined();
- expect(gpt4oChain!.message).toContain("gpt-5.4");
- });
-
- test("ai-gateway gpt-4o chain is skipped when gpt-5 already present", () => {
- const filePath = "/project/lib/ai.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- `// Migrated from gpt-4o`,
- `const result = await generateText({ model: 'openai/gpt-5.4' });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const gpt4oChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.message?.includes("gpt-4o"),
- );
- expect(gpt4oChain).toBeUndefined();
- });
-
- test("ai-gateway file with DALL-E reference chains to ai-sdk", () => {
- const filePath = "/project/app/api/image/route.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- ``,
- `const result = await generateText({`,
- ` model: 'openai/dall-e-3',`,
- ` prompt: 'A sunset',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-gateway");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const dalleChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.message?.includes("DALL-E"),
- );
- expect(dalleChain).toBeDefined();
- expect(dalleChain!.message).toContain("gemini-3.1-flash-image-preview");
- });
-
- test("ai-gateway DALL-E chain is skipped when gemini-3 present", () => {
- const filePath = "/project/app/api/image/route.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- `// Migrated from dall-e-3`,
- `const result = await generateText({ model: 'google/gemini-3.1-flash-image-preview' });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const dalleChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.message?.includes("DALL-E"),
- );
- expect(dalleChain).toBeUndefined();
- });
-
- test("ai-gateway file with gemini-2.x model chains to ai-sdk", () => {
- const filePath = "/project/app/api/image/route.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- ``,
- `const result = await generateText({`,
- ` model: 'google/gemini-2.0-flash-exp-image-generation',`,
- ` prompt: 'Generate an image',`,
- `});`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- expect(matched).toContain("ai-gateway");
-
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const gemini2Chain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.message?.includes("Gemini 2.x"),
- );
- expect(gemini2Chain).toBeDefined();
- expect(gemini2Chain!.message).toContain("gemini-3");
- });
-
- test("ai-gateway gemini-2.x chain is skipped when gemini-3 present", () => {
- const filePath = "/project/app/api/image/route.ts";
- const fileContent = [
- `import { generateText } from 'ai';`,
- `// Old: gemini-2.0-flash-exp-image-generation`,
- `const result = await generateText({ model: 'google/gemini-3.1-flash-image-preview' });`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- const gemini2Chain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.message?.includes("Gemini 2.x"),
- );
- expect(gemini2Chain).toBeUndefined();
- });
-
- test("ai-gateway provider API key chain has skipIfFileContains for OIDC", () => {
- const filePath = "/project/lib/ai.ts";
- const fileContent = [
- `import { gateway } from 'ai';`,
- `// Using OIDC — no manual keys`,
- `const token = process.env.VERCEL_OIDC_TOKEN;`,
- `// Legacy reference: ANTHROPIC_API_KEY was here`,
- ].join("\n");
-
- const matched = matchFileToSkills(filePath, fileContent, data.compiledSkills, data.rulesMap, undefined, data.chainMap);
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const chainResult = runChainInjection(fileContent, matched, data.chainMap, null, ROOT, undefined, cleanEnv);
-
- // skipIfFileContains: 'VERCEL_OIDC|@ai-sdk/gateway|gateway(' should suppress
- const apiKeyChain = chainResult.injected.find(
- (i) => i.sourceSkill === "ai-gateway" && i.message?.includes("Provider-specific API key"),
- );
- expect(apiKeyChain).toBeUndefined();
- });
-});
-
-// ---------------------------------------------------------------------------
-// formatOutput with chainResult
-// ---------------------------------------------------------------------------
-
-describe("formatOutput with chain injection", () => {
- let formatOutput: typeof import("../hooks/src/posttooluse-validate.mts").formatOutput;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-validate.mjs");
- formatOutput = mod.formatOutput;
- });
-
- test("chain-only output (no violations) produces additionalContext", () => {
- const chainResult = {
- injected: [
- {
- sourceSkill: "ai-sdk",
- targetSkill: "ai-gateway",
- message: "Direct API key detected.",
- content: "# AI Gateway\n\nUse OIDC for auth.",
- },
- ],
- totalBytes: 40,
- };
-
- const result = formatOutput([], ["ai-sdk"], "/test/file.ts", undefined, "claude-code", undefined, chainResult);
- const parsed = JSON.parse(result);
- expect(parsed.hookSpecificOutput).toBeDefined();
- const ctx = parsed.hookSpecificOutput.additionalContext;
-
- // Chain markers present
- expect(ctx).toContain("");
- expect(ctx).toContain("");
- expect(ctx).toContain("**Skill context auto-loaded** (ai-gateway):");
- expect(ctx).toContain("Direct API key detected.");
- expect(ctx).toContain("# AI Gateway");
-
- // Metadata
- const meta = extractPostValidation(parsed.hookSpecificOutput);
- expect(meta).toBeDefined();
- expect(meta.chainedSkills).toEqual(["ai-gateway"]);
- expect(meta.errorCount).toBe(0);
- });
-
- test("violations + chains appear together in additionalContext", () => {
- const violations = [{
- skill: "ai-sdk",
- line: 3,
- message: "Use @ai-sdk/openai provider",
- severity: "error" as const,
- matchedText: "import OpenAI from 'openai'",
- }];
-
- const chainResult = {
- injected: [
- {
- sourceSkill: "ai-sdk",
- targetSkill: "ai-gateway",
- content: "# AI Gateway\n\nGateway docs here.",
- },
- ],
- totalBytes: 35,
- };
-
- const result = formatOutput(violations, ["ai-sdk"], "/test/file.ts", undefined, "claude-code", undefined, chainResult);
- const parsed = JSON.parse(result);
- const ctx = parsed.hookSpecificOutput.additionalContext;
-
- // Both validation and chain content present
- expect(ctx).toContain("VALIDATION");
- expect(ctx).toContain("[ERROR]");
- expect(ctx).toContain("");
- expect(ctx).toContain("# AI Gateway");
-
- const meta = extractPostValidation(parsed.hookSpecificOutput);
- expect(meta.errorCount).toBe(1);
- expect(meta.chainedSkills).toEqual(["ai-gateway"]);
- });
-
- test("no violations and no chains returns empty JSON", () => {
- const result = formatOutput([], ["ai-sdk"], "/test/file.ts", undefined, "claude-code", undefined, { injected: [], totalBytes: 0 });
- expect(result).toBe("{}");
- });
-});
-
-// ---------------------------------------------------------------------------
-// Integration tests (full hook process spawn)
-// ---------------------------------------------------------------------------
-
-describe("posttooluse-chain integration", () => {
- let tmpDir: string;
- let testFile: string;
-
- beforeEach(() => {
- tmpDir = join(tmpdir(), `posttooluse-chain-${Date.now()}-${Math.random().toString(36).slice(2)}`);
- mkdirSync(tmpDir, { recursive: true });
- testFile = join(tmpDir, "app", "api", "chat", "route.ts");
- mkdirSync(join(tmpDir, "app", "api", "chat"), { recursive: true });
- });
-
- test("direct provider key in AI SDK file triggers ai-gateway chain injection", async () => {
- writeFileSync(testFile, [
- `import { generateText } from 'ai';`,
- ``,
- `const key = process.env.OPENAI_API_KEY;`,
- `const result = await generateText({`,
- ` model: 'openai/gpt-5.4',`,
- ` prompt: 'Hello!',`,
- `});`,
- ].join("\n"));
-
- const { code, parsed, ctx } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
-
- expect(code).toBe(0);
- // Should have chain injection for ai-gateway
- if (ctx) {
- // The ai-sdk skill's chainTo should fire for OPENAI_API_KEY
- const meta = extractPostValidation(parsed.hookSpecificOutput);
- if (meta?.chainedSkills?.length > 0) {
- expect(meta.chainedSkills).toContain("ai-gateway");
- expect(ctx).toContain("posttooluse-chain:");
- }
- }
- });
-
- test("toDataStreamResponse triggers chain injection", async () => {
- writeFileSync(testFile, [
- `import { streamText } from 'ai';`,
- ``,
- `export async function POST(req: Request) {`,
- ` const result = streamText({`,
- ` model: 'openai/gpt-5.4',`,
- ` prompt: 'Hello!',`,
- ` });`,
- ` return result.toDataStreamResponse();`,
- `}`,
- ].join("\n"));
-
- const { code, ctx } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
-
- expect(code).toBe(0);
- // ai-sdk skill should match via import, and chainTo for toDataStreamResponse should fire
- if (ctx) {
- const hasChain = ctx.includes("posttooluse-chain:");
- const hasValidation = ctx.includes("VALIDATION");
- // Should have at least validation or chain output
- expect(hasChain || hasValidation).toBe(true);
- }
- });
-
- test("clean file with no deprecated patterns produces no chain injection", async () => {
- writeFileSync(testFile, [
- `import { generateText } from 'ai';`,
- `import { useChat } from '@ai-sdk/react';`,
- ``,
- `const result = await generateText({`,
- ` model: 'openai/gpt-5.4',`,
- ` prompt: 'Hello!',`,
- `});`,
- ].join("\n"));
-
- const { code, ctx } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
-
- expect(code).toBe(0);
- // No chain markers expected for clean code
- expect(ctx).not.toContain("posttooluse-chain:");
- });
-});
-
-// ---------------------------------------------------------------------------
-// PostToolUse Bash chain: package install detection
-// ---------------------------------------------------------------------------
-
-describe("posttooluse-bash-chain: parseInstallCommand", () => {
- let parseInstallCommand: typeof import("../hooks/src/posttooluse-bash-chain.mts").parseInstallCommand;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-bash-chain.mjs");
- parseInstallCommand = mod.parseInstallCommand;
- });
-
- test("parses npm install with single package", () => {
- expect(parseInstallCommand("npm install express")).toEqual(["express"]);
- });
-
- test("parses npm i shorthand", () => {
- expect(parseInstallCommand("npm i express")).toEqual(["express"]);
- });
-
- test("parses yarn add with multiple packages", () => {
- expect(parseInstallCommand("yarn add express mongoose")).toEqual(["express", "mongoose"]);
- });
-
- test("parses pnpm add with scoped package", () => {
- expect(parseInstallCommand("pnpm add @vercel/postgres")).toEqual(["@vercel/postgres"]);
- });
-
- test("parses bun add", () => {
- expect(parseInstallCommand("bun add openai")).toEqual(["openai"]);
- });
-
- test("strips version specifiers from unscoped packages", () => {
- expect(parseInstallCommand("npm install express@latest")).toEqual(["express"]);
- expect(parseInstallCommand("npm install express@^4.0.0")).toEqual(["express"]);
- });
-
- test("strips version specifiers from scoped packages", () => {
- expect(parseInstallCommand("npm install @vercel/postgres@0.10.0")).toEqual(["@vercel/postgres"]);
- });
-
- test("filters out flags like --save-dev and -D", () => {
- expect(parseInstallCommand("npm install --save-dev express")).toEqual(["express"]);
- expect(parseInstallCommand("npm install -D express")).toEqual(["express"]);
- });
-
- test("returns empty array for non-install commands", () => {
- expect(parseInstallCommand("npm run dev")).toEqual([]);
- expect(parseInstallCommand("git commit -m 'test'")).toEqual([]);
- expect(parseInstallCommand("ls -la")).toEqual([]);
- });
-
- test("returns empty array for empty or null input", () => {
- expect(parseInstallCommand("")).toEqual([]);
- expect(parseInstallCommand(null as any)).toEqual([]);
- });
-
- test("handles npm install with no packages (bare install)", () => {
- // "npm install" with no packages — the regex requires at least one token after install
- // but the trailing space matches empty, which gets filtered
- const result = parseInstallCommand("npm install");
- // Should not crash; result depends on regex behavior
- expect(Array.isArray(result)).toBe(true);
- });
-
- test("filters out path arguments", () => {
- expect(parseInstallCommand("npm install ./local-pkg ../other-pkg")).toEqual([]);
- expect(parseInstallCommand("npm install /absolute/path")).toEqual([]);
- });
-});
-
-describe("posttooluse-bash-chain: runBashChainInjection", () => {
- let runBashChainInjection: typeof import("../hooks/src/posttooluse-bash-chain.mts").runBashChainInjection;
- let PACKAGE_SKILL_MAP: typeof import("../hooks/src/posttooluse-bash-chain.mts").PACKAGE_SKILL_MAP;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-bash-chain.mjs");
- runBashChainInjection = mod.runBashChainInjection;
- PACKAGE_SKILL_MAP = mod.PACKAGE_SKILL_MAP;
- });
-
- test("express maps to vercel-functions", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(
- ["express"],
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-functions");
- expect(result.injected[0].packageName).toBe("express");
- expect(result.injected[0].content.length).toBeGreaterThan(0);
- expect(result.totalBytes).toBeGreaterThan(0);
- });
-
- test("bullmq maps to vercel-queues", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(
- ["bullmq"],
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-queues");
- });
-
- test("mongoose maps to vercel-storage", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(
- ["mongoose"],
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-storage");
- });
-
- test("@vercel/postgres maps to vercel-storage", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(
- ["@vercel/postgres"],
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-storage");
- expect(result.injected[0].message).toContain("sunset");
- });
-
- test("openai maps to ai-gateway", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(
- ["openai"],
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("ai-gateway");
- });
-
- test("already-seen skill is NOT re-injected", () => {
- const envWithSeen: any = { VERCEL_PLUGIN_SEEN_SKILLS: "vercel-functions" };
- const result = runBashChainInjection(
- ["express"],
- null,
- ROOT,
- undefined,
- envWithSeen,
- );
-
- expect(result.injected.length).toBe(0);
- });
-
- test("unknown package produces no injection", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(
- ["lodash"],
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(0);
- expect(result.totalBytes).toBe(0);
- });
-
- test("multiple packages mapping to same skill only inject once", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(
- ["express", "fastify"], // both map to vercel-functions
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-functions");
- });
-
- test("chain cap is respected", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "", VERCEL_PLUGIN_CHAIN_CAP: "1" };
- const result = runBashChainInjection(
- ["express", "openai", "mongoose"], // 3 different skills
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.injected.length).toBe(1);
- });
-
- test("byte budget is respected", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "", VERCEL_PLUGIN_CHAIN_CAP: "10" };
- const result = runBashChainInjection(
- ["express", "openai", "mongoose", "bullmq", "swr"],
- null,
- ROOT,
- undefined,
- cleanEnv,
- );
-
- expect(result.totalBytes).toBeLessThanOrEqual(18_000);
- });
-
- test("PACKAGE_SKILL_MAP has required entries", () => {
- expect(PACKAGE_SKILL_MAP["express"]?.skill).toBe("vercel-functions");
- expect(PACKAGE_SKILL_MAP["bullmq"]?.skill).toBe("vercel-queues");
- expect(PACKAGE_SKILL_MAP["mongoose"]?.skill).toBe("vercel-storage");
- expect(PACKAGE_SKILL_MAP["@vercel/postgres"]?.skill).toBe("vercel-storage");
- expect(PACKAGE_SKILL_MAP["openai"]?.skill).toBe("ai-gateway");
- // New entries
- expect(PACKAGE_SKILL_MAP["prisma"]?.skill).toBe("vercel-storage");
- expect(PACKAGE_SKILL_MAP["@libsql/client"]?.skill).toBe("vercel-storage");
- expect(PACKAGE_SKILL_MAP["stripe"]?.skill).toBe("payments");
- expect(PACKAGE_SKILL_MAP["langchain"]?.skill).toBe("ai-sdk");
- expect(PACKAGE_SKILL_MAP["@clerk/nextjs"]?.skill).toBe("auth");
- expect(PACKAGE_SKILL_MAP["@sanity/client"]?.skill).toBe("cms");
- expect(PACKAGE_SKILL_MAP["contentful"]?.skill).toBe("cms");
- expect(PACKAGE_SKILL_MAP["resend"]?.skill).toBe("email");
- // Remaining entries
- expect(PACKAGE_SKILL_MAP["fastify"]?.skill).toBe("vercel-functions");
- expect(PACKAGE_SKILL_MAP["koa"]?.skill).toBe("vercel-functions");
- expect(PACKAGE_SKILL_MAP["bull"]?.skill).toBe("vercel-queues");
- expect(PACKAGE_SKILL_MAP["@vercel/kv"]?.skill).toBe("vercel-storage");
- expect(PACKAGE_SKILL_MAP["@anthropic-ai/sdk"]?.skill).toBe("ai-gateway");
- expect(PACKAGE_SKILL_MAP["@google/generative-ai"]?.skill).toBe("ai-gateway");
- expect(PACKAGE_SKILL_MAP["@langchain/core"]?.skill).toBe("ai-sdk");
- expect(PACKAGE_SKILL_MAP["workflow"]?.skill).toBe("workflow");
- expect(PACKAGE_SKILL_MAP["ai"]?.skill).toBe("ai-sdk");
- expect(PACKAGE_SKILL_MAP["@ai-sdk/react"]?.skill).toBe("ai-sdk");
- expect(PACKAGE_SKILL_MAP["@vercel/flags"]?.skill).toBe("vercel-flags");
- expect(PACKAGE_SKILL_MAP["swr"]?.skill).toBe("swr");
- expect(PACKAGE_SKILL_MAP["node-cron"]?.skill).toBe("cron-jobs");
- expect(PACKAGE_SKILL_MAP["cron"]?.skill).toBe("cron-jobs");
- // Iteration 2 entries
- expect(PACKAGE_SKILL_MAP["next-auth"]?.skill).toBe("auth");
- expect(PACKAGE_SKILL_MAP["@slack/bolt"]?.skill).toBe("chat-sdk");
- expect(PACKAGE_SKILL_MAP["@slack/web-api"]?.skill).toBe("chat-sdk");
- expect(PACKAGE_SKILL_MAP["discord.js"]?.skill).toBe("chat-sdk");
- expect(PACKAGE_SKILL_MAP["telegraf"]?.skill).toBe("chat-sdk");
- expect(PACKAGE_SKILL_MAP["grammy"]?.skill).toBe("chat-sdk");
- expect(PACKAGE_SKILL_MAP["helmet"]?.skill).toBe("vercel-firewall");
- expect(PACKAGE_SKILL_MAP["cors"]?.skill).toBe("routing-middleware");
- expect(PACKAGE_SKILL_MAP["dotenv"]?.skill).toBe("env-vars");
- });
-
- // -------------------------------------------------------------------
- // Individual injection tests for every PACKAGE_SKILL_MAP entry
- // -------------------------------------------------------------------
-
- test("prisma maps to vercel-storage", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["prisma"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-storage");
- expect(result.injected[0].message).toContain("Neon");
- });
-
- test("@libsql/client maps to vercel-storage", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@libsql/client"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-storage");
- });
-
- test("stripe maps to payments", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["stripe"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("payments");
- expect(result.injected[0].message).toContain("Stripe");
- });
-
- test("langchain maps to ai-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["langchain"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("ai-sdk");
- expect(result.injected[0].message).toContain("LangChain");
- });
-
- test("@langchain/core maps to ai-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@langchain/core"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("ai-sdk");
- expect(result.injected[0].message).toContain("LangChain");
- });
-
- test("@clerk/nextjs maps to auth", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@clerk/nextjs"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("auth");
- expect(result.injected[0].message).toContain("Clerk");
- });
-
- test("@anthropic-ai/sdk maps to ai-gateway", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@anthropic-ai/sdk"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("ai-gateway");
- expect(result.injected[0].message).toContain("Anthropic");
- });
-
- test("@google/generative-ai maps to ai-gateway", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@google/generative-ai"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("ai-gateway");
- expect(result.injected[0].message).toContain("Google");
- });
-
- test("@vercel/kv maps to vercel-storage with sunset message", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@vercel/kv"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-storage");
- expect(result.injected[0].message).toContain("sunset");
- });
-
- test("@sanity/client maps to cms", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@sanity/client"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("cms");
- });
-
- test("contentful maps to cms", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["contentful"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("cms");
- });
-
- test("resend maps to email", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["resend"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("email");
- });
-
- test("fastify maps to vercel-functions", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["fastify"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-functions");
- });
-
- test("koa maps to vercel-functions", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["koa"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-functions");
- });
-
- test("bull maps to vercel-queues", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["bull"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-queues");
- });
-
- test("workflow maps to workflow", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["workflow"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("workflow");
- });
-
- test("ai maps to ai-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["ai"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("ai-sdk");
- });
-
- test("@ai-sdk/react maps to ai-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@ai-sdk/react"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("ai-sdk");
- });
-
- test("@vercel/flags maps to vercel-flags", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@vercel/flags"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-flags");
- });
-
- test("swr maps to swr", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["swr"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("swr");
- });
-
- test("node-cron maps to cron-jobs", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["node-cron"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("cron-jobs");
- });
-
- test("cron maps to cron-jobs", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["cron"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("cron-jobs");
- });
-
- // -------------------------------------------------------------------
- // Iteration 2: new package entries
- // -------------------------------------------------------------------
-
- test("next-auth maps to auth", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["next-auth"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("auth");
- expect(result.injected[0].message).toContain("next-auth");
- });
-
- test("@slack/bolt maps to chat-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@slack/bolt"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("chat-sdk");
- expect(result.injected[0].message).toContain("Chat SDK");
- });
-
- test("@slack/web-api maps to chat-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["@slack/web-api"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("chat-sdk");
- });
-
- test("discord.js maps to chat-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["discord.js"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("chat-sdk");
- expect(result.injected[0].message).toContain("discord.js");
- });
-
- test("telegraf maps to chat-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["telegraf"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("chat-sdk");
- expect(result.injected[0].message).toContain("Telegraf");
- });
-
- test("grammy maps to chat-sdk", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["grammy"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("chat-sdk");
- expect(result.injected[0].message).toContain("Grammy");
- });
-
- test("helmet maps to vercel-firewall", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["helmet"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("vercel-firewall");
- expect(result.injected[0].message).toContain("Firewall");
- });
-
- test("cors maps to routing-middleware", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["cors"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("routing-middleware");
- expect(result.injected[0].message).toContain("Routing Middleware");
- });
-
- test("dotenv maps to env-vars", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "" };
- const result = runBashChainInjection(["dotenv"], null, ROOT, undefined, cleanEnv);
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("env-vars");
- expect(result.injected[0].message).toContain("vercel env");
- });
-
- // -------------------------------------------------------------------
- // Regression: cross-package dedup and budget compliance
- // -------------------------------------------------------------------
-
- test("prisma + stripe + langchain install targets 3 distinct skills", () => {
- const cleanEnv: any = { VERCEL_PLUGIN_SEEN_SKILLS: "", VERCEL_PLUGIN_CHAIN_CAP: "10" };
- const result = runBashChainInjection(
- ["prisma", "stripe", "langchain"],
- null, ROOT, undefined, cleanEnv,
- );
- expect(result.injected.length).toBe(3);
- expect(result.injected[0].skill).toBe("vercel-storage");
- expect(result.injected[1].skill).toBe("payments");
- expect(result.injected[2].skill).toBe("ai-sdk");
- expect(result.totalBytes).toBeLessThanOrEqual(18_000);
- });
-
- test("@clerk/nextjs + langchain + stripe dedup with seen skills", () => {
- const envWithSeen: any = { VERCEL_PLUGIN_SEEN_SKILLS: "auth,ai-sdk" };
- const result = runBashChainInjection(
- ["@clerk/nextjs", "langchain", "stripe"],
- null, ROOT, undefined, envWithSeen,
- );
- // auth and ai-sdk already seen, only stripe/payments should inject
- expect(result.injected.length).toBe(1);
- expect(result.injected[0].skill).toBe("payments");
- });
-});
-
-describe("posttooluse-bash-chain: formatBashChainOutput", () => {
- let formatBashChainOutput: typeof import("../hooks/src/posttooluse-bash-chain.mts").formatBashChainOutput;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-bash-chain.mjs");
- formatBashChainOutput = mod.formatBashChainOutput;
- });
-
- test("empty injections return empty JSON", () => {
- expect(formatBashChainOutput({ injected: [], totalBytes: 0 })).toBe("{}");
- });
-
- test("non-empty injections produce hookSpecificOutput with additionalContext", () => {
- const result = formatBashChainOutput({
- injected: [{
- packageName: "express",
- skill: "vercel-functions",
- message: "Express.js detected",
- content: "# Vercel Functions\nSome content here.",
- }],
- totalBytes: 100,
- });
-
- const parsed = JSON.parse(result);
- expect(parsed.hookSpecificOutput).toBeDefined();
- expect(parsed.hookSpecificOutput.additionalContext).toContain("posttooluse-bash-chain:");
- expect(parsed.hookSpecificOutput.additionalContext).toContain("vercel-functions");
- expect(parsed.hookSpecificOutput.additionalContext).toContain("Express.js detected");
- expect(parsed.hookSpecificOutput.additionalContext).toContain("postBashChain:");
- });
-});
-
-describe("posttooluse-bash-chain: parseBashInput", () => {
- let parseBashInput: typeof import("../hooks/src/posttooluse-bash-chain.mts").parseBashInput;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-bash-chain.mjs");
- parseBashInput = mod.parseBashInput;
- });
-
- test("parses valid Bash tool input", () => {
- const raw = JSON.stringify({
- tool_name: "Bash",
- tool_input: { command: "npm install express" },
- session_id: "test-session",
- });
- const result = parseBashInput(raw);
- expect(result).not.toBeNull();
- expect(result!.command).toBe("npm install express");
- expect(result!.sessionId).toBe("test-session");
- });
-
- test("returns null for non-Bash tool", () => {
- const raw = JSON.stringify({
- tool_name: "Write",
- tool_input: { file_path: "/test.ts" },
- });
- expect(parseBashInput(raw)).toBeNull();
- });
-
- test("returns null for empty input", () => {
- expect(parseBashInput("")).toBeNull();
- });
-
- test("returns null for invalid JSON", () => {
- expect(parseBashInput("not json")).toBeNull();
- });
-
- test("returns null for Bash tool with no command", () => {
- const raw = JSON.stringify({
- tool_name: "Bash",
- tool_input: {},
- });
- expect(parseBashInput(raw)).toBeNull();
- });
-});
diff --git a/tests/posttooluse-validate.test.ts b/tests/posttooluse-validate.test.ts
deleted file mode 100644
index 13ffe80..0000000
--- a/tests/posttooluse-validate.test.ts
+++ /dev/null
@@ -1,635 +0,0 @@
-import { describe, test, expect, beforeEach } from "bun:test";
-import { existsSync, mkdtempSync, writeFileSync, mkdirSync, readFileSync, rmSync } from "node:fs";
-import { join, resolve } from "node:path";
-import { tmpdir } from "node:os";
-import { readSessionFile } from "../hooks/src/hook-env.mts";
-
-const ROOT = resolve(import.meta.dirname, "..");
-const HOOK_SCRIPT = join(ROOT, "hooks", "posttooluse-validate.mjs");
-
-// Unique session ID per test run
-let testSession: string;
-
-beforeEach(() => {
- testSession = `test-${Date.now()}-${Math.random().toString(36).slice(2)}`;
-});
-
-/**
- * Extract postValidation metadata from additionalContext.
- */
-function extractPostValidation(hookSpecificOutput: any): any {
- const ctx = hookSpecificOutput?.additionalContext || "";
- const match = ctx.match(//);
- if (!match) return undefined;
- try { return JSON.parse(match[1]); } catch { return undefined; }
-}
-
-async function runHook(
- input: object,
- extraEnv?: Record,
-): Promise<{ code: number; stdout: string; stderr: string }> {
- const payload = JSON.stringify({ ...input, session_id: testSession });
- const proc = Bun.spawn(["node", HOOK_SCRIPT], {
- stdin: "pipe",
- stdout: "pipe",
- stderr: "pipe",
- env: {
- ...process.env,
- VERCEL_PLUGIN_VALIDATED_FILES: "",
- ...extraEnv,
- },
- });
- proc.stdin.write(payload);
- proc.stdin.end();
- const code = await proc.exited;
- const stdout = await new Response(proc.stdout).text();
- const stderr = await new Response(proc.stderr).text();
- return { code, stdout, stderr };
-}
-
-describe("posttooluse-validate.mjs", () => {
- test("hook script exists", () => {
- expect(existsSync(HOOK_SCRIPT)).toBe(true);
- });
-
- test("outputs empty JSON for unsupported tool (Read)", async () => {
- const { code, stdout } = await runHook({
- tool_name: "Read",
- tool_input: { file_path: "/some/file.ts" },
- });
- expect(code).toBe(0);
- expect(JSON.parse(stdout)).toEqual({});
- });
-
- test("outputs empty JSON for unsupported tool (Bash)", async () => {
- const { code, stdout } = await runHook({
- tool_name: "Bash",
- tool_input: { command: "npm run dev" },
- });
- expect(code).toBe(0);
- expect(JSON.parse(stdout)).toEqual({});
- });
-
- test("outputs empty JSON for empty stdin", async () => {
- const proc = Bun.spawn(["node", HOOK_SCRIPT], {
- stdin: "pipe",
- stdout: "pipe",
- stderr: "pipe",
- });
- proc.stdin.end();
- const code = await proc.exited;
- const stdout = await new Response(proc.stdout).text();
- expect(code).toBe(0);
- expect(JSON.parse(stdout)).toEqual({});
- });
-
- test("outputs empty JSON for missing file_path", async () => {
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: {},
- });
- expect(code).toBe(0);
- expect(JSON.parse(stdout)).toEqual({});
- });
-
- test("outputs empty JSON for non-existent file", async () => {
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: "/nonexistent/path/file.ts" },
- });
- expect(code).toBe(0);
- expect(JSON.parse(stdout)).toEqual({});
- });
-
- describe("validation rules", () => {
- let tmpDir: string;
- let testFile: string;
-
- beforeEach(() => {
- tmpDir = join(tmpdir(), `posttooluse-validate-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
- mkdirSync(tmpDir, { recursive: true });
- testFile = join(tmpDir, "app", "api", "chat", "route.ts");
- mkdirSync(join(tmpDir, "app", "api", "chat"), { recursive: true });
- });
-
- test("detects direct openai import in ai-sdk file", async () => {
- writeFileSync(testFile, `import OpenAI from 'openai';\n\nconst client = new OpenAI();\n`);
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
- expect(code).toBe(0);
- const result = JSON.parse(stdout);
- expect(result.hookSpecificOutput).toBeDefined();
- const ctx = result.hookSpecificOutput.additionalContext;
- expect(ctx).toContain("@ai-sdk/openai");
- expect(ctx).toContain("VALIDATION");
- const meta = extractPostValidation(result.hookSpecificOutput);
- expect(meta).toBeDefined();
- expect(meta.errorCount).toBeGreaterThan(0);
- expect(meta.filePath).toBe(testFile);
- });
-
- test("detects raw Anthropic client usage", async () => {
- writeFileSync(testFile, `import Anthropic from 'anthropic';\nconst client = new Anthropic();\n`);
- const { code, stdout } = await runHook({
- tool_name: "Edit",
- tool_input: { file_path: testFile },
- });
- expect(code).toBe(0);
- const result = JSON.parse(stdout);
- expect(result.hookSpecificOutput).toBeDefined();
- const ctx = result.hookSpecificOutput.additionalContext;
- expect(ctx).toContain("@ai-sdk/anthropic");
- expect(ctx).toContain("VALIDATION");
- const meta = extractPostValidation(result.hookSpecificOutput);
- expect(meta).toBeDefined();
- expect(meta.errorCount).toBeGreaterThan(0);
- });
-
- test("detects Experimental_Agent usage", async () => {
- writeFileSync(testFile, `import { Experimental_Agent } from 'ai';\nconst agent = new Experimental_Agent({});\n`);
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
- expect(code).toBe(0);
- const result = JSON.parse(stdout);
- expect(result.hookSpecificOutput).toBeDefined();
- const ctx = result.hookSpecificOutput.additionalContext;
- expect(ctx).toContain("Experimental_Agent");
- expect(ctx).toContain("ToolLoopAgent");
- const meta = extractPostValidation(result.hookSpecificOutput);
- expect(meta).toBeDefined();
- expect(meta.errorCount).toBeGreaterThan(0);
- });
-
- test("no violations for correct ai-sdk usage", async () => {
- writeFileSync(testFile, [
- `import { generateText, gateway } from 'ai';`,
- `import { openai } from '@ai-sdk/openai';`,
- ``,
- `const result = await generateText({`,
- ` model: gateway('openai/gpt-5.4'),`,
- ` prompt: 'Hello!',`,
- `});`,
- ].join("\n"));
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
- expect(code).toBe(0);
- const result = JSON.parse(stdout);
- // Clean file: either no hookSpecificOutput, or zero errors
- if (result.hookSpecificOutput) {
- const meta = extractPostValidation(result.hookSpecificOutput);
- expect(meta?.errorCount || 0).toBe(0);
- }
- });
-
- test("detects gateway from 'ai' with hyphenated model slug", async () => {
- writeFileSync(testFile, [
- `import { generateText, gateway } from 'ai';`,
- ``,
- `const result = await generateText({`,
- ` model: gateway('anthropic/claude-sonnet-4-6'),`,
- ` prompt: 'Hello!',`,
- `});`,
- ].join("\n"));
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
- expect(code).toBe(0);
- const result = JSON.parse(stdout);
- expect(result.hookSpecificOutput).toBeDefined();
- const ctx = result.hookSpecificOutput.additionalContext;
- expect(ctx).toContain("VALIDATION");
- expect(ctx).toContain("dots not hyphens");
- const meta = extractPostValidation(result.hookSpecificOutput);
- expect(meta).toBeDefined();
- expect(meta.errorCount).toBeGreaterThan(0);
- expect(meta.matchedSkills).toContain("ai-gateway");
- });
-
- test("no output for file that doesn't match any skill", async () => {
- const randomFile = join(tmpDir, "random-file.txt");
- writeFileSync(randomFile, "hello world\nimport OpenAI from 'openai';\n");
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: randomFile },
- });
- expect(code).toBe(0);
- // The file content has an openai import but the path doesn't match any skill
- // The import WOULD match ai-sdk skill, so it could still fire
- // That's fine - import matching is valid
- });
- });
-
- describe("dedup via VERCEL_PLUGIN_VALIDATED_FILES", () => {
- let tmpDir: string;
- let testFile: string;
-
- beforeEach(() => {
- tmpDir = join(tmpdir(), `posttooluse-validate-dedup-${Date.now()}-${Math.random().toString(36).slice(2)}`);
- mkdirSync(tmpDir, { recursive: true });
- testFile = join(tmpDir, "app", "api", "chat", "route.ts");
- mkdirSync(join(tmpDir, "app", "api", "chat"), { recursive: true });
- writeFileSync(testFile, `import OpenAI from 'openai';\n`);
- });
-
- test("second run with same content is still valid (dedup is per-process env)", async () => {
- // First run
- const first = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
- expect(first.code).toBe(0);
-
- // Second run with same file - env is fresh per process, so it should still validate
- const second = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
- expect(second.code).toBe(0);
- // Both should produce valid JSON
- JSON.parse(first.stdout);
- JSON.parse(second.stdout);
- });
-
- test("writes validated state to the session file without appending CLAUDE_ENV_FILE exports", async () => {
- const envDir = mkdtempSync(join(tmpdir(), "posttooluse-validate-env-"));
- const envFile = join(envDir, "claude.env");
- writeFileSync(envFile, "export SEEDED=1\n");
-
- try {
- const result = await runHook(
- {
- tool_name: "Write",
- tool_input: { file_path: testFile },
- },
- {
- CLAUDE_ENV_FILE: envFile,
- },
- );
-
- expect(result.code).toBe(0);
- expect(readFileSync(envFile, "utf-8")).toBe("export SEEDED=1\n");
- expect(readSessionFile(testSession, "validated-files")).toContain(`${testFile}:`);
- } finally {
- rmSync(envDir, { recursive: true, force: true });
- }
- });
- });
-
- describe("parseInput unit tests (imported)", () => {
- // Import the module for unit testing
- let parseInput: typeof import("../hooks/src/posttooluse-validate.mts").parseInput;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-validate.mjs");
- parseInput = mod.parseInput;
- });
-
- test("returns null for empty string", () => {
- expect(parseInput("")).toBeNull();
- });
-
- test("returns null for invalid JSON", () => {
- expect(parseInput("not json")).toBeNull();
- });
-
- test("returns null for unsupported tool", () => {
- expect(parseInput(JSON.stringify({ tool_name: "Read", tool_input: { file_path: "/test" } }))).toBeNull();
- });
-
- test("returns null for missing file_path", () => {
- expect(parseInput(JSON.stringify({ tool_name: "Write", tool_input: {} }))).toBeNull();
- });
-
- test("parses Write tool correctly", () => {
- const result = parseInput(JSON.stringify({
- tool_name: "Write",
- tool_input: { file_path: "/test/file.ts" },
- cwd: "/workspace",
- }));
- expect(result).not.toBeNull();
- expect(result!.toolName).toBe("Write");
- expect(result!.filePath).toBe("/test/file.ts");
- expect(result!.cwd).toBe("/workspace");
- });
-
- test("parses Edit tool correctly", () => {
- const result = parseInput(JSON.stringify({
- tool_name: "Edit",
- tool_input: { file_path: "/test/file.ts" },
- }));
- expect(result).not.toBeNull();
- expect(result!.toolName).toBe("Edit");
- });
- });
-
- describe("runValidation unit tests (imported)", () => {
- let runValidation: typeof import("../hooks/src/posttooluse-validate.mts").runValidation;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-validate.mjs");
- runValidation = mod.runValidation;
- });
-
- test("finds violations on matching lines", () => {
- const rules = new Map([
- ["test-skill", [
- { pattern: "import.*from ['\"]openai['\"]", message: "Use @ai-sdk/openai", severity: "error" as const },
- ]],
- ]);
- const content = `import OpenAI from 'openai';\nconst x = 1;\n`;
- const violations = runValidation(content, ["test-skill"], rules);
- expect(violations.length).toBe(1);
- expect(violations[0].line).toBe(1);
- expect(violations[0].severity).toBe("error");
- expect(violations[0].skill).toBe("test-skill");
- });
-
- test("returns empty for non-matching content", () => {
- const rules = new Map([
- ["test-skill", [
- { pattern: "import.*from ['\"]openai['\"]", message: "Use @ai-sdk/openai", severity: "error" as const },
- ]],
- ]);
- const content = `import { openai } from '@ai-sdk/openai';\n`;
- const violations = runValidation(content, ["test-skill"], rules);
- expect(violations.length).toBe(0);
- });
-
- test("handles multiple skills with overlapping rules", () => {
- const rules = new Map([
- ["skill-a", [
- { pattern: "new OpenAI\\(", message: "Don't use raw OpenAI", severity: "error" as const },
- ]],
- ["skill-b", [
- { pattern: "new OpenAI\\(", message: "Use AI SDK instead", severity: "error" as const },
- ]],
- ]);
- const content = `const client = new OpenAI();\n`;
- const violations = runValidation(content, ["skill-a", "skill-b"], rules);
- expect(violations.length).toBe(2);
- expect(new Set(violations.map(v => v.skill))).toEqual(new Set(["skill-a", "skill-b"]));
- });
-
- test("skips invalid regex patterns gracefully", () => {
- const rules = new Map([
- ["test-skill", [
- { pattern: "[invalid(regex", message: "test", severity: "error" as const },
- { pattern: "validPattern", message: "found it", severity: "error" as const },
- ]],
- ]);
- const content = `validPattern here\n`;
- const violations = runValidation(content, ["test-skill"], rules);
- expect(violations.length).toBe(1);
- expect(violations[0].message).toBe("found it");
- });
- });
-
- describe("dedup helpers unit tests (imported)", () => {
- let parseValidatedFiles: typeof import("../hooks/src/posttooluse-validate.mts").parseValidatedFiles;
- let appendValidatedFile: typeof import("../hooks/src/posttooluse-validate.mts").appendValidatedFile;
- let contentHash: typeof import("../hooks/src/posttooluse-validate.mts").contentHash;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-validate.mjs");
- parseValidatedFiles = mod.parseValidatedFiles;
- appendValidatedFile = mod.appendValidatedFile;
- contentHash = mod.contentHash;
- });
-
- test("parseValidatedFiles handles empty string", () => {
- expect(parseValidatedFiles("")).toEqual(new Set());
- });
-
- test("parseValidatedFiles handles undefined", () => {
- expect(parseValidatedFiles(undefined)).toEqual(new Set());
- });
-
- test("parseValidatedFiles parses comma-delimited entries", () => {
- const result = parseValidatedFiles("file1.ts:abc123,file2.ts:def456");
- expect(result.has("file1.ts:abc123")).toBe(true);
- expect(result.has("file2.ts:def456")).toBe(true);
- expect(result.size).toBe(2);
- });
-
- test("appendValidatedFile appends to empty", () => {
- expect(appendValidatedFile("", "file.ts:abc")).toBe("file.ts:abc");
- });
-
- test("appendValidatedFile appends to existing", () => {
- expect(appendValidatedFile("a:1", "b:2")).toBe("a:1,b:2");
- });
-
- test("contentHash produces consistent short hash", () => {
- const h1 = contentHash("hello world");
- const h2 = contentHash("hello world");
- expect(h1).toBe(h2);
- expect(h1.length).toBe(12);
- // Different content → different hash
- expect(contentHash("different")).not.toBe(h1);
- });
- });
-
- describe("formatOutput unit tests (imported)", () => {
- let formatOutput: typeof import("../hooks/src/posttooluse-validate.mts").formatOutput;
-
- beforeEach(async () => {
- const mod = await import("../hooks/posttooluse-validate.mjs");
- formatOutput = mod.formatOutput;
- });
-
- test("returns empty JSON for no violations", () => {
- const result = formatOutput([], ["ai-sdk"], "/test/file.ts");
- expect(result).toBe("{}");
- });
-
- test("returns suggestion output for warn-only violations at default log level", () => {
- const violations = [{
- skill: "test",
- line: 1,
- message: "just a warning",
- severity: "warn" as const,
- matchedText: "something",
- }];
- const result = formatOutput(violations, ["test"], "/test/file.ts");
- const parsed = JSON.parse(result);
- expect(parsed.hookSpecificOutput).toBeDefined();
- expect(parsed.hookSpecificOutput.hookEventName).toBe("PostToolUse");
- const ctx = parsed.hookSpecificOutput.additionalContext;
- expect(ctx).toContain("[SUGGESTION]");
- expect(ctx).toContain("just a warning");
- expect(ctx).toContain("Consider applying these suggestions");
- expect(ctx).not.toContain("[ERROR]");
- expect(ctx).not.toContain("Please fix these issues");
- const meta = extractPostValidation(parsed.hookSpecificOutput);
- expect(meta.errorCount).toBe(0);
- expect(meta.warnCount).toBe(1);
- });
-
- test("returns recommended output for recommended-only violations", () => {
- const violations = [{
- skill: "ai-sdk",
- line: 2,
- message: "outdated model",
- severity: "recommended" as const,
- matchedText: "gpt-4o",
- }];
- const result = formatOutput(violations, ["ai-sdk"], "/test/file.ts");
- const parsed = JSON.parse(result);
- expect(parsed.hookSpecificOutput).toBeDefined();
- expect(parsed.hookSpecificOutput.hookEventName).toBe("PostToolUse");
- const ctx = parsed.hookSpecificOutput.additionalContext;
- expect(ctx).toContain("[RECOMMENDED]");
- expect(ctx).toContain("outdated model");
- expect(ctx).toContain("Apply these recommendations before continuing");
- expect(ctx).not.toContain("[ERROR]");
- expect(ctx).not.toContain("[SUGGESTION]");
- expect(ctx).not.toContain("Consider applying");
- const meta = extractPostValidation(parsed.hookSpecificOutput);
- expect(meta.errorCount).toBe(0);
- expect(meta.recommendedCount).toBe(1);
- expect(meta.warnCount).toBe(0);
- });
-
- test("errors take precedence over recommended in call-to-action", () => {
- const violations = [
- { skill: "ai-sdk", line: 1, message: "error msg", severity: "error" as const, matchedText: "x" },
- { skill: "ai-sdk", line: 2, message: "rec msg", severity: "recommended" as const, matchedText: "y" },
- ];
- const result = formatOutput(violations, ["ai-sdk"], "/test/file.ts");
- const parsed = JSON.parse(result);
- const ctx = parsed.hookSpecificOutput.additionalContext;
- expect(ctx).toContain("[ERROR]");
- expect(ctx).toContain("[RECOMMENDED]");
- expect(ctx).toContain("Please fix these issues");
- expect(ctx).not.toContain("Apply these recommendations");
- });
-
- test("includes error violations in output", () => {
- const violations = [{
- skill: "ai-sdk",
- line: 3,
- message: "Use @ai-sdk/openai provider",
- severity: "error" as const,
- matchedText: "import OpenAI from 'openai'",
- }];
- const result = formatOutput(violations, ["ai-sdk"], "/test/file.ts");
- const parsed = JSON.parse(result);
- expect(parsed.hookSpecificOutput).toBeDefined();
- expect(parsed.hookSpecificOutput.hookEventName).toBe("PostToolUse");
- expect(parsed.hookSpecificOutput.additionalContext).toContain("VALIDATION");
- expect(parsed.hookSpecificOutput.additionalContext).toContain("Line 3");
- expect(parsed.hookSpecificOutput.additionalContext).toContain("@ai-sdk/openai");
-
- const meta = extractPostValidation(parsed.hookSpecificOutput);
- expect(meta.errorCount).toBe(1);
- expect(meta.warnCount).toBe(0);
- });
-
- test("emits hard skill upgrade instructions once per target skill", () => {
- const violations = [
- {
- skill: "legacy-sdk",
- line: 3,
- message: "Use the ai-sdk skill",
- severity: "error" as const,
- matchedText: "legacy",
- upgradeToSkill: "ai-sdk",
- upgradeWhy: "legacy provider rules are outdated",
- upgradeMode: "hard" as const,
- },
- {
- skill: "legacy-sdk",
- line: 7,
- message: "Repeated upgrade should dedupe",
- severity: "recommended" as const,
- matchedText: "legacy-again",
- upgradeToSkill: "ai-sdk",
- upgradeWhy: "this should not be repeated",
- upgradeMode: "soft" as const,
- },
- {
- skill: "legacy-router",
- line: 11,
- message: "Use the nextjs skill",
- severity: "warn" as const,
- matchedText: "router",
- upgradeToSkill: "nextjs",
- },
- ];
- const result = formatOutput(violations, ["legacy-sdk", "legacy-router"], "/test/file.ts");
- const parsed = JSON.parse(result);
- const ctx = parsed.hookSpecificOutput.additionalContext;
-
- expect(ctx).toContain("\n\nREQUIRED: Use the Skill tool now to load ai-sdk. Reason: legacy provider rules are outdated");
- expect(ctx).toContain('');
- expect((ctx.match(/Use the Skill tool now to load ai-sdk\./g) ?? []).length).toBe(1);
- expect((ctx.match(//g) ?? []).length).toBe(1);
- expect(ctx).toContain("\n\nUse the Skill tool now to load nextjs.");
- expect(ctx).toContain('');
- });
-
- test("output conforms to SyncHookJSONOutput schema", () => {
- const violations = [{
- skill: "ai-sdk",
- line: 1,
- message: "test error",
- severity: "error" as const,
- matchedText: "test",
- }];
- const result = formatOutput(violations, ["ai-sdk"], "/test/file.ts");
- const parsed = JSON.parse(result);
- // Must only have hookSpecificOutput at top level (no extra keys)
- const topKeys = Object.keys(parsed);
- expect(topKeys).toEqual(["hookSpecificOutput"]);
- // hookSpecificOutput must have hookEventName and additionalContext
- const hso = parsed.hookSpecificOutput;
- expect(Object.keys(hso).sort()).toEqual(["additionalContext", "hookEventName"]);
- });
- });
-
- describe("performance", () => {
- let tmpDir: string;
- let testFile: string;
-
- beforeEach(() => {
- tmpDir = join(tmpdir(), `posttooluse-perf-${Date.now()}`);
- mkdirSync(tmpDir, { recursive: true });
- testFile = join(tmpDir, "app", "api", "chat", "route.ts");
- mkdirSync(join(tmpDir, "app", "api", "chat"), { recursive: true });
- });
-
- test("hook completes within 100ms for a typical file", async () => {
- // Create a moderately sized file
- const lines = [
- `import { generateText, gateway } from 'ai';`,
- `import { openai } from '@ai-sdk/openai';`,
- ...Array.from({ length: 50 }, (_, i) => `const line${i} = "some content ${i}";`),
- `export default async function handler() {`,
- ` const result = await generateText({ model: gateway('openai/gpt-5.4'), prompt: 'Hello' });`,
- ` return result.text;`,
- `}`,
- ];
- writeFileSync(testFile, lines.join("\n"));
-
- const start = performance.now();
- const { code, stdout } = await runHook({
- tool_name: "Write",
- tool_input: { file_path: testFile },
- });
- const elapsed = performance.now() - start;
-
- expect(code).toBe(0);
- JSON.parse(stdout); // valid JSON
- // Allow generous time for process spawn overhead, but log the actual time
- // The hook itself should be well under 100ms; process spawn adds ~50-100ms
- expect(elapsed).toBeLessThan(5000); // Very generous for CI; real latency is much lower
- });
- });
-});
diff --git a/tests/pretooluse-skill-inject.test.ts b/tests/pretooluse-skill-inject.test.ts
index 5a635d1..45cc61c 100644
--- a/tests/pretooluse-skill-inject.test.ts
+++ b/tests/pretooluse-skill-inject.test.ts
@@ -2688,22 +2688,18 @@ describe("ai-sdk bash patterns match @ai-sdk/ scoped packages", () => {
});
describe("hooks.json PreToolUse config", () => {
- test("has PreToolUse matcher for Read|Edit|Write|Bash", () => {
+ test("does not auto-register the Read|Edit|Write|Bash skill injection hook", () => {
const hooks = JSON.parse(readFileSync(join(ROOT, "hooks", "hooks.json"), "utf-8"));
- expect(hooks.hooks.PreToolUse).toBeDefined();
- expect(hooks.hooks.PreToolUse.length).toBeGreaterThan(0);
-
- const matcher = hooks.hooks.PreToolUse[0].matcher;
- expect(matcher).toContain("Read");
- expect(matcher).toContain("Edit");
- expect(matcher).toContain("Write");
- expect(matcher).toContain("Bash");
- });
+ const preToolHooks = hooks.hooks.PreToolUse ?? [];
+ const skillInjectionEntry = preToolHooks.find((entry: any) =>
+ Array.isArray(entry?.hooks)
+ && entry.hooks.some((hook: any) =>
+ typeof hook?.command === "string"
+ && hook.command.includes("pretooluse-skill-inject.mjs"),
+ ),
+ );
- test("references the correct hook script", () => {
- const hooks = JSON.parse(readFileSync(join(ROOT, "hooks", "hooks.json"), "utf-8"));
- const hookCmd = hooks.hooks.PreToolUse[0].hooks[0].command;
- expect(hookCmd).toContain("pretooluse-skill-inject.mjs");
+ expect(skillInjectionEntry).toBeUndefined();
});
});
diff --git a/tests/pretooluse-subagent-spawn-observe.test.ts b/tests/pretooluse-subagent-spawn-observe.test.ts
deleted file mode 100644
index 0f1a7ba..0000000
--- a/tests/pretooluse-subagent-spawn-observe.test.ts
+++ /dev/null
@@ -1,94 +0,0 @@
-import { afterEach, describe, expect, mock, test } from "bun:test";
-import { resolve } from "node:path";
-
-const ROOT = resolve(import.meta.dirname, "..");
-const SOURCE_HOOK = resolve(ROOT, "hooks", "src", "pretooluse-subagent-spawn-observe.mts");
-const COMPILED_HOOK = resolve(ROOT, "hooks", "pretooluse-subagent-spawn-observe.mjs");
-const SOURCE_STATE = resolve(ROOT, "hooks", "src", "subagent-state.mjs");
-const COMPILED_STATE = resolve(ROOT, "hooks", "subagent-state.mjs");
-
-let appendCalls: Array<{ sessionId: string; launch: Record }> = [];
-
-function configureStateMock(modulePath: string): void {
- appendCalls = [];
- mock.module(modulePath, () => ({
- appendPendingLaunch: (sessionId: string, launch: Record) => {
- appendCalls.push({ sessionId, launch });
- },
- }));
-}
-
-async function loadVariant(kind: "source" | "compiled") {
- if (kind === "source") {
- configureStateMock(SOURCE_STATE);
- return import(`${SOURCE_HOOK}?t=${Date.now()}-${Math.random()}`);
- }
-
- configureStateMock(COMPILED_STATE);
- return import(`${COMPILED_HOOK}?t=${Date.now()}-${Math.random()}`);
-}
-
-afterEach(() => {
- mock.restore();
- delete process.env.SESSION_ID;
-});
-
-for (const kind of ["source", "compiled"] as const) {
- describe(`${kind} pretooluse-subagent-spawn-observe`, () => {
- test("returns empty JSON and does not record for non-Agent tools", async () => {
- const mod = await loadVariant(kind);
-
- expect(mod.run(JSON.stringify({
- tool_name: "Read",
- tool_input: { file_path: "/tmp/example.ts" },
- session_id: "session-non-agent",
- }))).toBe("{}");
-
- expect(appendCalls).toEqual([]);
- });
-
- test("writes a pending launch record for Agent tool input", async () => {
- const mod = await loadVariant(kind);
- const sessionId = "session-agent";
-
- expect(mod.run(JSON.stringify({
- tool_name: "Agent",
- session_id: sessionId,
- tool_input: {
- description: "Observe the failing deploy",
- prompt: "Inspect the Vercel deployment logs and summarize the first error",
- subagent_type: "Explore",
- resume: "resume-token",
- name: "observer",
- },
- }))).toBe("{}");
-
- expect(appendCalls).toHaveLength(1);
- expect(appendCalls[0]?.sessionId).toBe(sessionId);
- expect(appendCalls[0]?.launch).toMatchObject({
- description: "Observe the failing deploy",
- prompt: "Inspect the Vercel deployment logs and summarize the first error",
- subagent_type: "Explore",
- resume: "resume-token",
- name: "observer",
- });
- expect(typeof appendCalls[0]?.launch.createdAt).toBe("number");
- });
-
- test("falls back to SESSION_ID when stdin omits session_id", async () => {
- const mod = await loadVariant(kind);
- process.env.SESSION_ID = "env-session-id";
-
- expect(mod.run(JSON.stringify({
- tool_name: "Agent",
- tool_input: {
- description: "Fallback session",
- prompt: "Use env session id",
- },
- }))).toBe("{}");
-
- expect(appendCalls).toHaveLength(1);
- expect(appendCalls[0]?.sessionId).toBe("env-session-id");
- });
- });
-}
diff --git a/tests/session-end-cleanup.test.ts b/tests/session-end-cleanup.test.ts
index e3e2c08..1375046 100644
--- a/tests/session-end-cleanup.test.ts
+++ b/tests/session-end-cleanup.test.ts
@@ -31,53 +31,53 @@ async function runSessionEnd(
}
describe("session-end-cleanup", () => {
- test("removes hashed pending-launch directories for unsafe session ids", async () => {
+ test("removes hashed seen-skills directories for unsafe session ids", async () => {
const sessionId = "unsafe/session:id";
const hashedSessionId = createHash("sha256").update(sessionId).digest("hex");
- const pendingLaunchDir = join(resolve(tmpdir()), `vercel-plugin-${hashedSessionId}-pending-launches`);
+ const seenSkillsDir = join(resolve(tmpdir()), `vercel-plugin-${hashedSessionId}-seen-skills.d`);
- mkdirSync(pendingLaunchDir, { recursive: true });
- writeFileSync(join(pendingLaunchDir, "launch.json"), "{\"ok\":true}\n", "utf-8");
+ mkdirSync(seenSkillsDir, { recursive: true });
+ writeFileSync(join(seenSkillsDir, "nextjs"), "", "utf-8");
try {
- expect(existsSync(pendingLaunchDir)).toBe(true);
+ expect(existsSync(seenSkillsDir)).toBe(true);
const { code, stdout, stderr } = await runSessionEnd({ session_id: sessionId });
expect(code).toBe(0);
expect(stdout).toBe("");
expect(stderr).toBe("");
- expect(existsSync(pendingLaunchDir)).toBe(false);
+ expect(existsSync(seenSkillsDir)).toBe(false);
} finally {
- rmSync(pendingLaunchDir, { recursive: true, force: true });
+ rmSync(seenSkillsDir, { recursive: true, force: true });
}
});
- test("removes hashed pending-launch directories when only conversation_id is provided", async () => {
+ test("removes hashed seen-skills directories when only conversation_id is provided", async () => {
const conversationId = "cursor/conversation:id";
const hashedSessionId = createHash("sha256").update(conversationId).digest("hex");
- const pendingLaunchDir = join(resolve(tmpdir()), `vercel-plugin-${hashedSessionId}-pending-launches`);
+ const seenSkillsDir = join(resolve(tmpdir()), `vercel-plugin-${hashedSessionId}-seen-skills.d`);
- mkdirSync(pendingLaunchDir, { recursive: true });
- writeFileSync(join(pendingLaunchDir, "launch.json"), "{\"ok\":true}\n", "utf-8");
+ mkdirSync(seenSkillsDir, { recursive: true });
+ writeFileSync(join(seenSkillsDir, "nextjs"), "", "utf-8");
try {
- expect(existsSync(pendingLaunchDir)).toBe(true);
+ expect(existsSync(seenSkillsDir)).toBe(true);
const { code, stdout, stderr } = await runSessionEnd({ conversation_id: conversationId });
expect(code).toBe(0);
expect(stdout).toBe("");
expect(stderr).toBe("");
- expect(existsSync(pendingLaunchDir)).toBe(false);
+ expect(existsSync(seenSkillsDir)).toBe(false);
} finally {
- rmSync(pendingLaunchDir, { recursive: true, force: true });
+ rmSync(seenSkillsDir, { recursive: true, force: true });
}
});
});
describe("hooks.json wiring", () => {
- test("registers the Agent pretooluse observer hook", () => {
+ test("does not register the Agent pretooluse observer hook", () => {
const config = JSON.parse(readFileSync(HOOKS_CONFIG_PATH, "utf-8")) as {
hooks?: {
PreToolUse?: Array<{
@@ -87,15 +87,6 @@ describe("hooks.json wiring", () => {
};
};
- expect(config.hooks?.PreToolUse).toContainEqual({
- matcher: "Agent",
- hooks: [
- {
- type: "command",
- command: "node \"${CLAUDE_PLUGIN_ROOT}/hooks/pretooluse-subagent-spawn-observe.mjs\"",
- timeout: 5,
- },
- ],
- });
+ expect((config.hooks?.PreToolUse ?? []).some((entry) => entry.matcher === "Agent")).toBe(false);
});
});
diff --git a/tests/session-start-activation.test.ts b/tests/session-start-activation.test.ts
new file mode 100644
index 0000000..d0cde09
--- /dev/null
+++ b/tests/session-start-activation.test.ts
@@ -0,0 +1,106 @@
+import { afterEach, beforeEach, describe, expect, test } from "bun:test";
+import { mkdirSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from "node:fs";
+import { tmpdir } from "node:os";
+import { join, resolve } from "node:path";
+import { readSessionFile } from "../hooks/src/hook-env.mts";
+
+const ROOT = resolve(import.meta.dirname, "..");
+const PROFILER = join(ROOT, "hooks", "session-start-profiler.mjs");
+const NODE_BIN = Bun.which("node") || "node";
+
+let tempDir: string;
+let envFile: string;
+let testSessionId: string;
+
+async function runProfiler(projectRoot: string): Promise<{
+ code: number;
+ stdout: string;
+ stderr: string;
+}> {
+ const proc = Bun.spawn([NODE_BIN, PROFILER], {
+ stdin: "pipe",
+ stdout: "pipe",
+ stderr: "pipe",
+ env: {
+ ...(process.env as Record),
+ CLAUDE_ENV_FILE: envFile,
+ CLAUDE_PROJECT_ROOT: projectRoot,
+ },
+ });
+
+ proc.stdin.write(JSON.stringify({ session_id: testSessionId }));
+ proc.stdin.end();
+
+ const code = await proc.exited;
+ const stdout = await new Response(proc.stdout).text();
+ const stderr = await new Response(proc.stderr).text();
+ return { code, stdout, stderr };
+}
+
+beforeEach(() => {
+ tempDir = mkdtempSync(join(tmpdir(), "session-start-activation-"));
+ envFile = join(tempDir, "claude.env");
+ writeFileSync(envFile, "", "utf-8");
+ testSessionId = `session-start-activation-${Date.now()}-${Math.random().toString(36).slice(2)}`;
+});
+
+afterEach(() => {
+ rmSync(tempDir, { recursive: true, force: true });
+});
+
+describe("session-start activation", () => {
+ test("greenfield directories still activate", async () => {
+ const projectDir = join(tempDir, "greenfield");
+ mkdirSync(projectDir);
+
+ const result = await runProfiler(projectDir);
+
+ expect(result.code).toBe(0);
+ expect(result.stdout).toContain("greenfield project");
+ expect(readSessionFile(testSessionId, "greenfield")).toBe("true");
+ expect(readSessionFile(testSessionId, "likely-skills")).toContain("nextjs");
+ });
+
+ test("non-empty unrelated directories skip activation", async () => {
+ const projectDir = join(tempDir, "plain-project");
+ mkdirSync(projectDir);
+ writeFileSync(join(projectDir, "README.md"), "# Plain project");
+
+ const result = await runProfiler(projectDir);
+
+ expect(result.code).toBe(0);
+ expect(result.stdout.trim()).toBe("");
+ expect(readFileSync(envFile, "utf-8")).toBe("");
+ expect(readSessionFile(testSessionId, "greenfield")).toBe("");
+ expect(readSessionFile(testSessionId, "likely-skills")).toBe("");
+ });
+
+ test("detected vercel projects still activate", async () => {
+ const projectDir = join(tempDir, "next-project");
+ mkdirSync(projectDir);
+ writeFileSync(join(projectDir, "next.config.ts"), "export default {};");
+
+ const result = await runProfiler(projectDir);
+
+ expect(result.code).toBe(0);
+ expect(readSessionFile(testSessionId, "likely-skills")).toContain("nextjs");
+ });
+
+ test("package.json vercel signals are enough to activate", async () => {
+ const projectDir = join(tempDir, "pkg-signals");
+ mkdirSync(projectDir);
+ writeFileSync(
+ join(projectDir, "package.json"),
+ JSON.stringify({
+ dependencies: {
+ "@vercel/blob": "^1.0.0",
+ },
+ }),
+ );
+
+ const result = await runProfiler(projectDir);
+
+ expect(result.code).toBe(0);
+ expect(readSessionFile(testSessionId, "likely-skills")).toContain("vercel-storage");
+ });
+});
diff --git a/tests/session-start-profiler.test.ts b/tests/session-start-profiler.test.ts
index 905b3aa..061f168 100644
--- a/tests/session-start-profiler.test.ts
+++ b/tests/session-start-profiler.test.ts
@@ -127,6 +127,23 @@ describe("session-start-profiler", () => {
expect(skills).not.toContain("observability");
});
+ test("skips non-empty non-vercel projects", async () => {
+ const projectDir = join(tempDir, "plain-project");
+ mkdirSync(projectDir);
+ writeFileSync(join(projectDir, "README.md"), "# Plain project");
+
+ const result = await runProfiler({
+ CLAUDE_ENV_FILE: envFile,
+ CLAUDE_PROJECT_ROOT: projectDir,
+ });
+
+ expect(result.code).toBe(0);
+ expect(result.stdout.trim()).toBe("");
+ expect(readGreenfieldState()).toBe("");
+ expect(readSessionFile(testSessionId, "likely-skills")).toBe("");
+ expect(readFileSync(envFile, "utf-8")).toBe("");
+ });
+
test("detects Next.js project via next.config.ts", async () => {
const projectDir = join(tempDir, "nextjs-project");
mkdirSync(projectDir);
diff --git a/tests/subagent-lifecycle-integration.test.ts b/tests/subagent-lifecycle-integration.test.ts
deleted file mode 100644
index 2736ee6..0000000
--- a/tests/subagent-lifecycle-integration.test.ts
+++ /dev/null
@@ -1,342 +0,0 @@
-import { describe, test, expect, beforeEach, afterEach } from "bun:test";
-import { readFileSync, rmSync, existsSync } from "node:fs";
-import { join, resolve } from "node:path";
-import { tmpdir } from "node:os";
-import {
- listSessionKeys,
- removeSessionClaimDir,
-} from "../hooks/src/hook-env.mts";
-
-const ROOT = resolve(import.meta.dirname, "..");
-const BOOTSTRAP_SCRIPT = join(ROOT, "hooks", "subagent-start-bootstrap.mjs");
-const PRETOOLUSE_SCRIPT = join(ROOT, "hooks", "pretooluse-skill-inject.mjs");
-const STOP_SCRIPT = join(ROOT, "hooks", "subagent-stop-sync.mjs");
-const UNLIMITED_BUDGET = "999999";
-
-let testSession: string;
-const cleanupPaths: string[] = [];
-
-beforeEach(() => {
- testSession = `lifecycle-${Date.now()}-${Math.random().toString(36).slice(2)}`;
-});
-
-afterEach(() => {
- // Clean up dedup claim dirs (both unscoped and agent-scoped) and ledger files
- removeSessionClaimDir(testSession, "seen-skills");
- // Clean up scoped claim dirs used by tests
- for (const agentId of ["explore-agent-1", "explore-1", "plan-1", "gp-agent-2", "explore-concurrent-a", "explore-concurrent-b"]) {
- removeSessionClaimDir(testSession, "seen-skills", agentId);
- }
- const ledger = join(
- tmpdir(),
- `vercel-plugin-${testSession}-subagent-ledger.jsonl`,
- );
- if (existsSync(ledger)) {
- rmSync(ledger, { force: true });
- }
- for (const p of cleanupPaths) {
- try {
- rmSync(p, { recursive: true, force: true });
- } catch {}
- }
- cleanupPaths.length = 0;
-});
-
-// ---------------------------------------------------------------------------
-// Helpers
-// ---------------------------------------------------------------------------
-
-async function runHook(
- script: string,
- input: Record,
- env: Record,
-): Promise<{ code: number; stdout: string; stderr: string }> {
- const payload = JSON.stringify({ session_id: testSession, ...input });
-
- const proc = Bun.spawn(["node", script], {
- stdin: "pipe",
- stdout: "pipe",
- stderr: "pipe",
- env: {
- ...process.env,
- VERCEL_PLUGIN_LOG_LEVEL: "off",
- ...env,
- },
- });
-
- proc.stdin.write(payload);
- proc.stdin.end();
-
- const code = await proc.exited;
- const stdout = await new Response(proc.stdout).text();
- const stderr = await new Response(proc.stderr).text();
- return { code, stdout, stderr };
-}
-
-function runBootstrap(
- input: Record,
- env: Record,
-) {
- return runHook(BOOTSTRAP_SCRIPT, input, env);
-}
-
-function runPreToolUse(
- input: Record,
- env: Record,
-) {
- return runHook(PRETOOLUSE_SCRIPT, input, {
- VERCEL_PLUGIN_INJECTION_BUDGET: UNLIMITED_BUDGET,
- ...env,
- });
-}
-
-function runStop(
- input: Record,
- env: Record,
-) {
- return runHook(STOP_SCRIPT, input, env);
-}
-
-function parseInjectedSkills(stdout: string): string[] {
- if (!stdout.trim()) return [];
- const parsed = JSON.parse(stdout);
- const ctx = parsed?.hookSpecificOutput?.additionalContext || "";
- const match = ctx.match(//);
- const si = match ? JSON.parse(match[1]) : {};
- return Array.isArray(si.injectedSkills) ? si.injectedSkills : [];
-}
-
-function readLedger(
- sessionId: string,
-): Array