From ba9e6f3316984a475cbc1b99363f35f4e1c1c059 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sat, 28 Mar 2026 22:28:32 -0700 Subject: [PATCH 1/2] =?UTF-8?q?feat:=20add=20Kit=20system=20=E2=80=94=20sh?= =?UTF-8?q?areable=20bundles=20of=20skills,=20MCP=20servers,=20and=20instr?= =?UTF-8?q?uctions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces the Kit extension system that enables anyone — vendors, solution architects, team leads, individual engineers — to create and distribute shareable development setups. ## What's included **Core runtime** (`packages/opencode/src/kit/`): - `Kit` namespace with Zod schemas, state management, YAML loading - Trust tiers (`built-in`, `verified`, `community`) - Skill packs with activation modes (`always`, `detect`, `manual`) - Activate/deactivate lifecycle with full cleanup **11 CLI commands** (`packages/opencode/src/cli/cmd/kit.ts`): - `kit list`, `kit create`, `kit show`, `kit install`, `kit remove` - `kit activate` — one command: installs skills, configures MCP, enables - `kit deactivate` — clean removal (instructions + MCP config + active-kits) - `kit detect`, `kit search`, `kit status`, `kit validate` **TUI startup nudge** (`packages/opencode/src/cli/cmd/tui/thread.ts`): - Non-blocking detection on TUI startup - Shows one-line suggestion when matching kits found **JSONC-preserving config writes**: - Uses `jsonc-parser` `modify`/`applyEdits` to preserve user comments - MCP servers added on activate, removed on deactivate **Documentation** (`docs/`): - User guide: `docs/docs/configure/kits.md` (CLI reference, locations, tiers) - Author guide: `docs/docs/develop/kits.md` (full schema, tutorial, examples) - Ecosystem plan: `docs/PARTNER_ECOSYSTEM_PLAN.md` (strategy + simulation results) - Roadmap with planned features (`kit switch`, inheritance, `kit enforce`) ## Testing - 60/60 automated E2E tests passing (name validation, activate/deactivate lifecycle, MCP merge, JSONC preservation, detect, validate, install) - 10 stakeholder simulations across 5 scenarios (Snowflake, Dagster, dbt Labs, Airbyte, Healthcare, MSP consulting, OSS contributor, self-serve, enterprise) - 29 bugs found and fixed across 3 review rounds ## External - Kit content lives in `AltimateAI/data-engineering-skills` (merged PR #9) - Registry at `data-engineering-skills/registry.json` with 1 real entry - `dbt-snowflake` kit: 9 skills + dbt MCP server Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/PARTNER_ECOSYSTEM_PLAN.md | 939 +++++++++++ docs/docs/configure/index.md | 8 + docs/docs/configure/kits.md | 192 +++ docs/docs/develop/ecosystem.md | 18 + docs/docs/develop/kits.md | 422 +++++ docs/mkdocs.yml | 2 + .../opencode/src/altimate/telemetry/index.ts | 36 + packages/opencode/src/cli/cmd/kit.ts | 1366 +++++++++++++++++ packages/opencode/src/cli/cmd/tui/thread.ts | 25 + packages/opencode/src/config/config.ts | 3 + packages/opencode/src/index.ts | 6 + packages/opencode/src/kit/index.ts | 3 + packages/opencode/src/kit/kit.ts | 430 ++++++ 13 files changed, 3450 insertions(+) create mode 100644 docs/PARTNER_ECOSYSTEM_PLAN.md create mode 100644 docs/docs/configure/kits.md create mode 100644 docs/docs/develop/kits.md create mode 100644 packages/opencode/src/cli/cmd/kit.ts create mode 100644 packages/opencode/src/kit/index.ts create mode 100644 packages/opencode/src/kit/kit.ts diff --git a/docs/PARTNER_ECOSYSTEM_PLAN.md b/docs/PARTNER_ECOSYSTEM_PLAN.md new file mode 100644 index 0000000000..73546710b2 --- /dev/null +++ b/docs/PARTNER_ECOSYSTEM_PLAN.md @@ -0,0 +1,939 @@ +# Altimate Code — Extension Ecosystem Plan + +> **Purpose:** Enable anyone — vendors, solution architects, team leads, individual engineers — to extend Altimate Code with kits that bundle skills, MCP servers, and instructions. +> +> **Date:** 2026-03-28 | **Status:** Validated through 5 scenario simulations (12 personas) +> +> **Key rename:** "Recipe" → "Kit" (differentiation from Goose, clearer mental model) + +### Simulation Results (2026-03-28) +| Scenario | Score | Key Finding | +|----------|-------|-------------| +| Snowflake (Large Enterprise) | 5/10 | Demo-ready core, 5 deal blockers | +| Dagster (Growth Startup) | 6/10 | Would partner conditionally | +| Fortune 500 Bank (Enterprise) | 3/10 | Missing enforcement, use AGENTS.md today | +| Solo Consultant (SA) | 5/10 | Best natural fit, needs `kit switch` + cleanup | +| Series A Self-Serve | 3/10 | Nobody discovers kit without being told | + +**Universal finding:** Authoring experience is good. Single-developer workflow works. Discovery and multi-person story are broken. Auto-detect on startup is the #1 priority. + +--- + +## Table of Contents + +1. [Executive Summary](#1-executive-summary) +2. [Industry Landscape — How Others Do It](#2-industry-landscape) +3. [Our Extensibility Architecture](#3-our-extensibility-architecture) +4. [The Three-Layer Partner Model](#4-the-three-layer-partner-model) +5. [Layer 1: Agent Skills (SKILL.md)](#5-layer-1-agent-skills) +6. [Layer 2: MCP Servers](#6-layer-2-mcp-servers) +7. [Layer 3: Plugins (Deep Integration)](#7-layer-3-plugins) +8. [Kits: The Distribution Unit](#8-kits-the-distribution-unit) +9. [data-engineering-skills: The Open-Source Foundation](#9-data-engineering-skills-the-open-source-foundation) +10. [Onboarding Playbook](#10-onboarding-playbook) +11. [What We Need to Build](#11-what-we-need-to-build) +12. [Competitive Positioning](#12-competitive-positioning) +13. [Appendix: Research Sources](#13-appendix) + +--- + +## 1. Executive Summary + +The data engineering agent space is converging on **three complementary extension layers**: + +| Layer | What It Does | Portability | Effort to Build | Example | +|-------|-------------|-------------|-----------------|---------| +| **Agent Skills** | Teaches the AI *how to think* about tasks | Universal (30+ products) | Low (markdown) | "How to debug a dbt model" | +| **MCP Servers** | Gives the AI *tools to execute* tasks | Universal (any MCP client) | Medium (code) | `dbt build`, `dagster materialize` | +| **Plugins** | Deep platform integration (auth, UI, hooks) | Altimate-specific | High (TypeScript) | Custom auth flow, tool interception | + +**Our strategy:** Make Altimate Code the best host for data engineering extensions by providing all three layers, with `AltimateAI/data-engineering-skills` as the open-source foundation that any vendor can contribute to. + +**Why partners should care:** +- Skills authored once work across Claude Code, Cursor, VS Code Copilot, Gemini CLI, OpenCode, and 25+ other agents (via the [agentskills.io](https://agentskills.io) open standard) +- MCP servers work across Goose, Claude Desktop, Continue.dev, Cline, and every MCP-compatible client +- Partners get distribution to every data engineer using AI coding agents, not just Altimate Code users + +--- + +## 2. Industry Landscape + +### 2.1 How Goose (Block) Does It + +Goose made the boldest architectural decision: **Extensions ARE MCP servers.** No proprietary format. + +**Key patterns worth adopting:** + +| Pattern | How Goose Does It | Our Equivalent | +|---------|-------------------|----------------| +| Extension = MCP server | Any MCP server is auto-discovered | We support this via `config.mcp` | +| **Recipes** | YAML bundles: extensions + prompts + settings + parameters | **Kits** (KIT.yaml) — our equivalent | +| Deep links | `goose://extension?cmd=...` one-click install | Not yet | +| Extension directory | Curated browse page (70+ servers) | Not yet | +| Custom distros | Full white-label with bundled extensions | Possible via our config system | +| Subagent composition | Recipes spawn parallel sub-agents | We have agents but no kit system yet | +| Malware scanning | Auto-scan before extension activation | Not yet | + +**Goose's real partner integrations:** +- **DataHub + Block:** DataHub MCP server for metadata intelligence +- **OpenMetadata:** Published a Goose Recipe (not just extension) +- **Dagster:** Ships `dagster-mcp` that works with any MCP client including Goose +- **Docker:** Containerized extension execution + +**Goose's gaps (our opportunity):** +- No formal partner program or certification +- No marketplace economics (no paid extensions) +- No extension quality metrics or ratings +- No automated testing framework for extensions +- Extension discovery relies on external directories + +### 2.2 How OpenCode Upstream Does It + +OpenCode (our upstream fork) has a mature plugin system with 50+ community plugins: + +**Plugin hooks (20+ interception points):** +``` +auth, event, config, chat.message, chat.params, chat.headers, +permission.ask, command.execute.before, tool.execute.before, +tool.execute.after, tool.definition, shell.env, +experimental.chat.system.transform, experimental.session.compacting +``` + +**Plugin distribution:** npm packages (prefix `opencode-`) or local files in `.opencode/plugins/` + +**Skill loading hierarchy (8 sources):** +1. Built-in (embedded at build time) +2. Filesystem builtin (`~/.altimate/builtin/`) +3. External directories (`.claude/skills/`, `.agents/skills/`) +4. Global home-directory skills +5. Project-level skills (walked up directory tree) +6. `.opencode/skill/` directories +7. Config `skills.paths` (additional directories) +8. Config `skills.urls` (remote — fetches `index.json` then downloads files) + +**Key insight:** We already inherit all of this. The question is what we build ON TOP of it. + +### 2.3 Industry-Wide Convergence + +| Product | Skills | MCP | Plugins | Marketplace | +|---------|--------|-----|---------|-------------| +| Claude Code | SKILL.md | Yes | Yes (.claude-plugin) | Yes (official) | +| Goose | No | Yes (primary) | No (MCP only) | Browse page | +| Continue.dev | Rules | Yes (primary) | Config-based | Continue Hub | +| Cline | SKILL.md | Yes | VS Code ext | VS Code marketplace | +| Cursor | Rules | Yes | No | No | +| Codex CLI | SKILL.md | Planned | No | No | +| Gemini CLI | SKILL.md | Yes | No | No | +| **Altimate Code** | SKILL.md | Yes | Yes (hooks) | **Not yet** | + +**The market signal is clear:** MCP for tools, Skills for knowledge, Plugins for deep integration. All three matter. + +### 2.4 Data Vendor MCP Servers (Already Shipping) + +| Vendor | MCP Server | Tools | Maturity | +|--------|-----------|-------|----------| +| **dbt** | `dbt-mcp` | 58 tools (SQL, Semantic Layer, Discovery, Admin, CLI, codegen, docs) | Production | +| **Dagster** | `dg[mcp]` | CLI wrapper, scaffold, YAML config, code quality | Production | +| **Airbyte** | 3 servers: PyAirbyte MCP, Knowledge MCP, Connector Builder MCP | Pipeline generation, docs search, 600+ connectors | Production | +| **Snowflake** | Cortex MCP | Query, schema, governance | Beta | +| **DataHub** | DataHub MCP | Metadata, lineage, governance | Production | +| **OpenMetadata** | OpenMetadata MCP | Governance, quality, profiling | Production | + +**Critical realization:** These vendors already ship MCP servers. Our job is to make Altimate Code the BEST host for these servers by adding data-engineering-specific skills on top. + +--- + +## 3. Our Extensibility Architecture + +### 3.1 What We Already Have + +``` +┌─────────────────────────────────────────────────────────┐ +│ Altimate Code │ +│ │ +│ ┌──────────┐ ┌──────────┐ ┌──────────┐ ┌─────────┐ │ +│ │ Skills │ │ MCP │ │ Plugins │ │ Tools │ │ +│ │ (SKILL.md)│ │ Servers │ │ (Hooks) │ │ (Zod) │ │ +│ └────┬─────┘ └────┬─────┘ └────┬─────┘ └────┬────┘ │ +│ │ │ │ │ │ +│ ┌────┴──────────────┴──────────────┴──────────────┴───┐ │ +│ │ Agent Runtime (LLM Loop) │ │ +│ └─────────────────────┬───────────────────────────────┘ │ +│ │ │ +│ ┌─────────────────────┴───────────────────────────────┐ │ +│ │ SDK (@altimate/cli-sdk) — REST API + Types │ │ +│ └─────────────────────────────────────────────────────┘ │ +└──────────────────────────────────────────────────────────┘ +``` + +**Current extension points:** + +| Extension Point | Location | Partner Access | Gap | +|----------------|----------|---------------|-----| +| Skills (SKILL.md) | `packages/opencode/src/skill/` | Git repos, URLs, local dirs | No registry, no versioning | +| MCP Servers | `packages/opencode/src/mcp/` | Config YAML, auto-discovery | No bundled data-eng servers | +| Plugins (npm) | `packages/plugin/` | npm packages, local files | No marketplace | +| Tools (Zod) | `packages/opencode/src/tool/` | Config dirs, plugins | No external tool packaging | +| Providers | `packages/opencode/src/provider/` | Config, custom loaders | No plugin-based registration | +| SDK | `packages/sdk/js/` | REST API, OpenAPI types | No WebSocket, subprocess only | + +### 3.2 Config-Level Extension + +Partners can configure extensions via `opencode.jsonc` or `.altimate-code/`: + +```jsonc +{ + // Skills from partner repos + "skills": { + "paths": ["./vendor-skills/dagster/"], + "urls": ["https://raw.githubusercontent.com/DagsterHQ/dagster-skills/main/"] + }, + + // Partner MCP servers + "mcp": { + "dagster": { + "type": "stdio", + "command": ["uvx", "dg", "mcp", "serve"], + "env": { "DAGSTER_HOME": "/path/to/dagster" } + }, + "dbt": { + "type": "stdio", + "command": ["uvx", "dbt-mcp"], + "env": { "DBT_PROJECT_DIR": "./", "DBT_PROFILES_DIR": "~/.dbt" } + } + }, + + // Partner plugins + "plugin": ["@dagster/altimate-plugin@latest"] +} +``` + +--- + +## 4. The Three-Layer Partner Model + +We propose a **progressive complexity** model where partners choose their integration depth: + +``` + Effort ──────────────────────► + + ┌─────────────────────────────────────────────────────┐ + │ │ + │ Layer 1 Layer 2 Layer 3 │ + │ ──────── ──────── ──────── │ + │ │ + │ SKILL.md ──► MCP Server ──► Plugin │ + │ (Markdown) (Python/TS) (TypeScript) │ + │ │ + │ Teaches HOW Provides TOOLS Deep platform │ + │ to approach to execute integration │ + │ tasks tasks (auth, UI, hooks)│ + │ │ + │ ~1 day ~1 week ~2-4 weeks │ + │ │ + │ Works in 30+ Works in any Altimate-specific│ + │ AI agents MCP client but most powerful│ + │ │ + └─────────────────────────────────────────────────────┘ +``` + +Most partners start at Layer 1, add Layer 2 if they have an API/CLI, and only reach Layer 3 for deep integrations. + +--- + +## 5. Layer 1: Agent Skills (SKILL.md) + +### 5.1 Why Skills Matter + +Skills are the **highest-leverage, lowest-effort** extension point. They encode expert knowledge about how to use a vendor's tool. + +**Without a skill:** "Hey Claude, create a Dagster asset" → generic, possibly wrong output +**With a skill:** "Hey Claude, create a Dagster asset" → follows Dagster's opinionated patterns, uses `dg` CLI, validates with type checking + +### 5.2 Skill Authoring Guide for Partners + +**File structure:** +``` +dagster-skills/ +├── skills/ +│ ├── dagster/ +│ │ ├── creating-dagster-assets/ +│ │ │ └── SKILL.md +│ │ ├── debugging-dagster-runs/ +│ │ │ └── SKILL.md +│ │ ├── scheduling-dagster-jobs/ +│ │ │ └── SKILL.md +│ │ └── testing-dagster-assets/ +│ │ └── SKILL.md +│ └── index.json # For remote discovery +├── .claude-plugin/ +│ └── marketplace.json # For Claude Code marketplace +├── CONTRIBUTING.md +└── README.md +``` + +**SKILL.md format:** +```yaml +--- +name: creating-dagster-assets +description: | + Creates Dagster assets following project conventions. Use when: + (1) Creating new software-defined assets + (2) Task mentions "create", "build", "add" a Dagster asset + (3) Working with Dagster's asset-based orchestration +--- + +# Creating Dagster Assets + +**Read project structure before writing. Validate after creation.** + +## Critical Rules +1. ALWAYS use `@asset` decorator, never raw `@op` for new work +2. ALWAYS define `AssetSpec` with proper metadata +3. ALWAYS add asset checks for data quality +4. Use `dg` CLI for scaffolding when available + +## Workflow +1. **Explore** — Read existing assets in the project for conventions +2. **Scaffold** — Use `dg scaffold asset` if `dg` CLI available +3. **Implement** — Write the asset following project patterns +4. **Test** — Run `dagster asset materialize` to verify +5. **Validate** — Check asset appears in Dagster UI lineage graph + +## Anti-Patterns +- Do NOT use `@op` + `@job` for new data assets (legacy pattern) +- Do NOT hardcode partition definitions (use config) +- Do NOT skip `@asset_check` for critical data assets +``` + +**`index.json` format (for remote discovery via `skills.urls`):** +```json +{ + "skills": [ + { + "name": "creating-dagster-assets", + "description": "Creates Dagster assets following best practices", + "files": [ + "skills/dagster/creating-dagster-assets/SKILL.md" + ] + } + ] +} +``` + +**`marketplace.json` format (for Claude Code plugin marketplace):** +```json +{ + "name": "dagster-skills", + "owner": { "name": "Dagster Labs", "email": "oss@dagster.io" }, + "metadata": { + "description": "Expert skills for Dagster asset orchestration", + "version": "1.0.0" + }, + "plugins": [ + { + "name": "dagster-core-skills", + "description": "Core Dagster development skills", + "source": "./", + "skills": [ + "./skills/dagster/creating-dagster-assets", + "./skills/dagster/debugging-dagster-runs", + "./skills/dagster/scheduling-dagster-jobs", + "./skills/dagster/testing-dagster-assets" + ] + } + ] +} +``` + +### 5.3 Skill Quality Checklist + +| Criterion | Required | Description | +|-----------|----------|-------------| +| Actionable workflow | Yes | Step-by-step, not reference docs | +| Read-before-write | Yes | Always explore existing patterns first | +| Verification step | Yes | How to confirm the work is correct | +| Anti-patterns section | Recommended | Common mistakes to avoid | +| Tool references | Recommended | Which MCP tools to use if available | +| Benchmark tested | Recommended | Measured improvement on real tasks | + +### 5.4 Portability + +Skills authored for Altimate Code automatically work in: +- Claude Code (native SKILL.md support) +- Cursor (via rules import) +- VS Code Copilot (via agent skills) +- Gemini CLI (SKILL.md compatible) +- Codex CLI (SKILL.md compatible) +- Any product supporting the [agentskills.io](https://agentskills.io) standard + +This is the **key selling point for partners**: write once, distribute everywhere. + +--- + +## 6. Layer 2: MCP Servers + +### 6.1 Why MCP Servers + +MCP servers give the AI actual tools to call. While skills teach *how to think*, MCP servers provide *ability to act*. + +**The combination is powerful:** +- Skill says: "Run `dbt build --select model_name` to verify your changes" +- MCP server provides: the `dbt_build` tool that actually executes it + +### 6.2 What Partners Already Have + +Most data vendors already ship MCP servers: + +**dbt (58 tools):** +``` +dbt_build, dbt_run, dbt_test, dbt_compile, dbt_parse, +semantic_layer_query, discovery_api_query, admin_api_*, +code_generate_model, docs_search, ... +``` + +**Dagster:** +``` +dg scaffold, dg asset materialize, dg check, +pipeline status, run logs, sensor management, ... +``` + +**Airbyte:** +``` +create_pipeline, list_connectors, sync_connection, +search_docs, build_connector, ... +``` + +### 6.3 MCP Server Integration Guide for Partners + +**Option A: Partner publishes MCP server, we document the config** + +The partner publishes their MCP server to PyPI/npm. We add documentation and a recommended configuration: + +```jsonc +// Recommended config for Altimate Code users +{ + "mcp": { + "dagster": { + "type": "stdio", + "command": ["uvx", "dg", "mcp", "serve"], + "env": { + "DAGSTER_HOME": "${DAGSTER_HOME}" + } + } + } +} +``` + +**Option B: Bundle as part of a plugin (recommended for deep integration)** + +The partner's plugin includes `.mcp.json` that auto-configures their MCP server: + +```json +// .mcp.json inside the plugin package +{ + "mcpServers": { + "dagster": { + "type": "stdio", + "command": ["uvx", "dg", "mcp", "serve"], + "description": "Dagster asset orchestration" + } + } +} +``` + +**Option C: Altimate Code ships pre-configured connections** + +For strategic partners, we bundle MCP server configs that auto-detect the tool: +- Detect `dbt_project.yml` → suggest enabling dbt MCP +- Detect `dagster.yaml` → suggest enabling Dagster MCP +- Detect `airbyte/` directory → suggest enabling Airbyte MCP + +### 6.4 MCP Server Quality Requirements + +| Criterion | Required | Description | +|-----------|----------|-------------| +| Tool descriptions | Yes | Clear, actionable descriptions for each tool | +| Error messages | Yes | Structured errors the LLM can reason about | +| Timeout handling | Yes | Graceful handling of long-running operations | +| Auth documentation | Yes | Clear setup instructions for API keys/tokens | +| < 20 tools exposed | Recommended | Semantic Kernel research shows LLMs degrade above 20 | +| Tool filtering | Recommended | Support `available_tools` to limit exposed surface | + +--- + +## 7. Layer 3: Plugins (Deep Integration) + +### 7.1 When Partners Need Plugins + +Plugins are for partners who need to: +- Add custom authentication flows (OAuth with their cloud service) +- Intercept and modify tool execution (add warehouse-specific context) +- Inject system prompts (add vendor-specific instructions) +- Modify chat parameters (adjust for their use case) +- Add custom tools with complex logic + +### 7.2 Plugin Interface + +```typescript +import type { Plugin, PluginInput, Hooks, ToolDefinition } from "@altimate/cli-plugin" + +const dagsterPlugin: Plugin = async (input: PluginInput): Promise => { + const { client, project, directory, $ } = input + + return { + // Add custom tools + tool: { + "dagster.materialize": { + description: "Materialize a Dagster asset", + parameters: z.object({ + asset_key: z.string().describe("The asset key to materialize"), + partition: z.string().optional(), + }), + execute: async (args) => { + const result = await $`dg asset materialize ${args.asset_key}` + return { title: "Materialized", output: result.stdout, metadata: {} } + } + } + }, + + // Custom auth flow + auth: { + match: (provider) => provider.id === "dagster-cloud", + login: async () => { /* OAuth flow */ }, + logout: async () => { /* Cleanup */ }, + }, + + // Intercept tool execution + "tool.execute.before": async (input, output) => { + // Add Dagster context to SQL tools + if (input.toolID.startsWith("sql.")) { + output.args = { ...output.args, context: "dagster-managed" } + } + }, + + // Inject system prompt + "experimental.chat.system.transform": async (input, output) => { + output.system += "\nThis project uses Dagster for orchestration. Prefer asset-based patterns." + }, + + // React to events + event: async ({ event }) => { + if (event.type === "session.start") { + // Detect Dagster project and auto-configure + } + } + } +} + +export default dagsterPlugin +``` + +### 7.3 Available Hook Points + +| Hook | When It Fires | Partner Use Case | +|------|--------------|-----------------| +| `auth` | Authentication needed | OAuth with vendor cloud | +| `event` | Any system event | Project detection, telemetry | +| `config` | Config loaded | Inject vendor-specific defaults | +| `chat.message` | Message received | Message preprocessing | +| `chat.params` | Before LLM call | Adjust temperature, model | +| `chat.headers` | Before LLM call | Add custom headers | +| `permission.ask` | Permission requested | Auto-approve vendor tools | +| `command.execute.before` | Before command runs | Modify command | +| `tool.execute.before` | Before tool runs | Modify tool arguments | +| `tool.execute.after` | After tool runs | Process/enrich output | +| `tool.definition` | Tool registered | Modify tool descriptions | +| `shell.env` | Shell command runs | Inject env vars | +| `experimental.chat.system.transform` | System prompt built | Add vendor context | +| `experimental.session.compacting` | Context compaction | Preserve vendor state | + +### 7.4 Plugin Distribution + +```bash +# Published to npm +npm publish @dagster/altimate-plugin + +# Users install via config +# opencode.jsonc: +{ + "plugin": ["@dagster/altimate-plugin@latest"] +} + +# Or via CLI +altimate-code plugin install @dagster/altimate-plugin +``` + +### 7.5 Plugin Package Structure + +``` +@dagster/altimate-plugin/ +├── package.json +│ { +│ "name": "@dagster/altimate-plugin", +│ "version": "1.0.0", +│ "main": "./dist/index.js", +│ "peerDependencies": { +│ "@altimate/cli-plugin": "^1.2.0" +│ } +│ } +├── src/ +│ └── index.ts # Default export: Plugin function +├── skills/ # Bundled skills (optional) +│ └── dagster/ +│ └── creating-assets/SKILL.md +├── .mcp.json # Bundled MCP config (optional) +└── README.md +``` + +--- + +## 8. Kits: The Distribution Unit + +### 8.1 The Missing Piece + +Goose's most innovative pattern is **Recipes** — YAML files that bundle extensions + prompts + settings into shareable workflows. We should adopt this concept (renamed to **Kits** for differentiation). + +**Why kits matter for partners:** +- A Dagster skill alone is useful. A Dagster skill + Dagster MCP server + curated prompt + recommended settings = a **complete workflow**. +- Kits are the unit of distribution that partners can share with their community. + +### 8.2 Proposed Kit Format + +```yaml +# dagster-asset-development/KIT.yaml +name: dagster-asset-development +version: "1.0" +description: "Complete workflow for building Dagster assets with AI assistance" + +# Skills to activate +skills: + - source: "github:DagsterHQ/dagster-skills" + select: ["creating-dagster-assets", "testing-dagster-assets"] + +# MCP servers to enable +mcp: + dagster: + type: stdio + command: ["uvx", "dg", "mcp", "serve"] + env_keys: ["DAGSTER_HOME"] + +# Plugin to install (optional) +plugins: + - "@dagster/altimate-plugin@^1.0" + +# System instructions added to every conversation +instructions: | + This project uses Dagster for data orchestration. + Always prefer asset-based patterns over op/job patterns. + Use the `dg` CLI for scaffolding and validation. + +# Parameters the user must provide +parameters: + - key: dagster_home + description: "Path to your Dagster project" + required: true + env: DAGSTER_HOME + +# Recommended settings +settings: + tools: + dagster.materialize: true + dagster.check: true +``` + +### 8.3 Kit Installation + +```bash +# From URL +altimate-code kit install https://dagster.io/kits/asset-development + +# From GitHub +altimate-code kit install DagsterHQ/dagster-kits/asset-development + +# One-liner deep link (for docs/blog posts) +altimate-code://kit?url=https://dagster.io/kits/asset-development +``` + +### 8.4 Kit as the Partner Onboarding Unit + +When a partner says "I want my tool to work with Altimate Code," the deliverable is a kit: +1. Partner writes skills (Layer 1) — 1 day +2. Partner already has MCP server (Layer 2) — 0 days (usually exists) +3. Partner bundles into kit — 1 hour +4. Kit goes into their docs: "Use Dagster with AI → install this kit" + +--- + +## 9. data-engineering-skills: The Open-Source Foundation + +### 9.1 Current State + +**Repo:** [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) (73 stars, MIT licensed) + +**Current skills (10):** +| Vendor | Skills | Benchmark Impact | +|--------|--------|-----------------| +| dbt | 7 (create, debug, test, document, migrate, refactor, incremental) | +7% on ADE-bench (46.5% → 53%) | +| Snowflake | 3 (find expensive queries, optimize by ID, optimize by text) | 84% pass on TPC-H 1TB | + +**Already uses Claude Code plugin format** (`.claude-plugin/marketplace.json`) + +### 9.2 Strategy: Make It the Central Hub + +Transform `data-engineering-skills` from "our skills repo" to "the community skills repo for data engineering": + +``` +data-engineering-skills/ +├── skills/ +│ ├── dbt/ # ✅ Exists (7 skills) +│ ├── snowflake/ # ✅ Exists (3 skills) +│ ├── dagster/ # 🆕 Partner-contributed +│ ├── airbyte/ # 🆕 Partner-contributed +│ ├── fivetran/ # 🆕 Partner-contributed +│ ├── airflow/ # 🆕 Community-contributed +│ ├── spark/ # 🆕 Community-contributed +│ ├── bigquery/ # 🆕 Community-contributed +│ ├── databricks/ # 🆕 Community-contributed +│ └── great-expectations/ # 🆕 Community-contributed +├── kits/ # 🆕 Bundled kits +│ ├── dagster-development/KIT.yaml +│ ├── dbt-snowflake-pipeline/KIT.yaml +│ └── airbyte-ingestion/KIT.yaml +├── .claude-plugin/ +│ └── marketplace.json +├── benchmarks/ # 🆕 Benchmark results per skill +│ ├── ade-bench/ +│ └── spider2-dbt/ +├── CONTRIBUTING.md # Enhanced partner guide +├── PARTNER_GUIDE.md # 🆕 Detailed partner onboarding +└── README.md +``` + +### 9.3 Why This Works for Partners + +1. **Low barrier:** Partner writes 3-5 SKILL.md files in a PR — no SDK, no build system +2. **Credibility:** Published benchmarks prove skills improve AI performance +3. **Distribution:** Every Altimate Code user gets the skills; Claude Code users can install via marketplace +4. **Cross-promotion:** Partner's name appears in the repo, README, and marketplace listing +5. **Portable:** Skills work across 30+ AI agent products (not locked to Altimate Code) + +### 9.4 Partner Contribution Template + +```markdown + + +## Vendor: [Dagster] + +### Skills Added +- [ ] `creating-dagster-assets` — Asset creation workflow +- [ ] `debugging-dagster-runs` — Run failure diagnosis +- [ ] `testing-dagster-assets` — Asset testing patterns + +### Quality Checklist +- [ ] Each skill has actionable workflow steps (not reference docs) +- [ ] Each skill has a verification step +- [ ] Each skill has an anti-patterns section +- [ ] Skills reference MCP tools where applicable +- [ ] Skills tested with Claude/GPT-4 on real tasks +- [ ] Benchmark results included (if available) + +### MCP Server (optional) +- Package: `dg[mcp]` +- Install: `pip install "dg[mcp]"` +- Docs: https://dagster.io/docs/mcp + +### Kit (optional) +- [ ] KIT.yaml included in `kits/` +``` + +--- + +## 10. Partner Onboarding Playbook + +### 10.1 Timeline + +``` +Week 0 (Kickoff) +├── Partner intro call +├── Share this document + CONTRIBUTING.md +└── Partner identifies 3-5 initial skills + +Week 1 (Skills) +├── Partner writes SKILL.md files +├── We review for quality (checklist above) +└── PR merged to data-engineering-skills + +Week 2 (MCP — if applicable) +├── Partner confirms their MCP server works with Altimate Code +├── We add recommended config to our docs +└── Test skill + MCP combination + +Week 3 (Kit + Launch) +├── Bundle into KIT.yaml +├── Co-authored blog post / announcement +├── Listed in our extension directory +└── Partner adds "Works with Altimate Code" badge to their docs +``` + +### 10.2 Support We Provide + +| Support | Description | +|---------|-------------| +| Skill review | Code review of SKILL.md files for quality | +| MCP testing | Verify their MCP server works in our runtime | +| Benchmark run | Run their skills through ADE-bench or Spider2 | +| Co-marketing | Blog post, social, newsletter mention | +| Badge/logo | "Works with Altimate Code" badge for their docs | +| Direct Slack channel | Shared Slack channel for partner support | + +### 10.3 What Partners Deliver + +| Deliverable | Required? | Format | +|-------------|-----------|--------| +| 3-5 SKILL.md files | Yes | Markdown (PR to data-engineering-skills) | +| MCP server config | If they have one | JSON snippet for our docs | +| KIT.yaml | Recommended | YAML file | +| Plugin package | Optional | npm package | +| Blog post draft | Recommended | Markdown (co-authored) | + +--- + +## 11. What We Need to Build + +### 11.1 Priority 1: Kit System (Weeks 1-3) + +The single biggest gap vs. Goose. Kits bundle skills + MCP + plugins + instructions into one installable unit. + +**Implementation:** +- KIT.yaml schema and parser +- `altimate-code kit install ` CLI command +- Kit auto-detection (suggest kit when project type detected) +- Kit storage in `~/.altimate/kits/` + +**Files to modify:** +- New: `packages/opencode/src/kit/` (schema, loader, installer) +- New: `packages/opencode/src/cli/cmd/kit.ts` (CLI command) +- Modify: `packages/opencode/src/config/` (kit config integration) + +### 11.2 Priority 2: Extension Directory (Weeks 2-4) + +A browseable catalog of skills, MCP servers, and kits. + +**Options:** +- **Minimal:** Curated page on docs site (like Goose's browse page) +- **Medium:** GitHub-based registry (index.json in a repo, auto-generated site) +- **Full:** API-backed marketplace with search, ratings, install counts + +**Recommendation:** Start with a GitHub-based registry. The `data-engineering-skills` repo already has `index.json` support via our `Discovery.pull()` mechanism. + +### 11.3 Priority 3: Auto-Detection & Suggestion (Weeks 3-5) + +When a user opens Altimate Code in a Dagster project, automatically suggest: +- "Detected Dagster project. Install Dagster skills + MCP server?" + +**Implementation:** +- Project type detection (look for `dagster.yaml`, `dbt_project.yml`, `airbyte/`, etc.) +- Suggestion UI in TUI +- One-command install of recommended kit + +### 11.4 Priority 4: Partner SDK Documentation (Week 1) + +Publish clear documentation for each layer: +- Skill Authoring Guide (from Section 5 above) +- MCP Integration Guide (from Section 6 above) +- Plugin Development Guide (from Section 7 above) +- Kit Bundling Guide (from Section 8 above) + +### 11.5 Priority 5: Skill Versioning (Weeks 4-6) + +Current gap: no way to pin skill versions or handle updates. + +**Proposed:** Use git tags/releases in skill repos. `skills.urls` entries become: +```json +{ + "skills": { + "urls": ["https://github.com/DagsterHQ/dagster-skills/releases/download/v1.2.0/"] + } +} +``` + +### 11.6 Engineering Work Summary + +| Item | Effort | Priority | Dependency | +|------|--------|----------|------------| +| KIT.yaml schema + parser | 3 days | P0 | None | +| `kit install` CLI command | 2 days | P0 | Schema | +| Kit auto-detection | 2 days | P1 | Kit system | +| Extension directory (GitHub-based) | 3 days | P1 | None | +| Partner SDK documentation site | 3 days | P1 | None | +| Skill versioning (git tags) | 2 days | P2 | None | +| Deep links (`altimate-code://`) | 2 days | P2 | Kit system | +| Extension malware scanning | 3 days | P3 | None | +| Install count telemetry | 1 day | P3 | None | + +--- + +## 12. Competitive Positioning + +### 12.1 Our Advantages vs. Goose + +| Dimension | Goose | Altimate Code | Winner | +|-----------|-------|---------------|--------| +| Data engineering focus | Generic | Purpose-built (99+ DE tools) | **Altimate** | +| Skills system | No skills | SKILL.md + benchmark-proven | **Altimate** | +| MCP support | Primary interface | Full support + auto-detect | Tie | +| Plugin hooks | None (MCP only) | 20+ hooks for deep integration | **Altimate** | +| Recipes / Kits | Yes (mature) | Kits (planned) | **Goose** | +| Extension directory | 70+ servers listed | Not yet (planned) | **Goose** | +| Deep links | Yes | Not yet (planned) | **Goose** | +| Warehouse integrations | None built-in | 10 warehouses native | **Altimate** | +| SQL/dbt tools | Via MCP only | 99+ native tools | **Altimate** | +| Custom distros | Documented | Possible but undocumented | **Goose** | + +### 12.2 Our Advantages vs. Generic AI Agents + +- **Vertical expertise:** 11 data engineering skills + 99 specialized tools +- **Benchmark-proven:** ADE-bench, Spider2-dbt results published +- **Warehouse-native:** Direct connections to 10 data warehouses +- **dbt-native:** Deep dbt integration (not just MCP proxy) +- **Python bridge:** Full Python analysis engine (altimate-engine) + +### 12.3 Positioning Statement + +> **Altimate Code is the AI data engineering agent that works with your entire data stack.** Install skills and MCP servers from your favorite tools — dbt, Dagster, Airbyte, Snowflake, and more — and get an AI assistant that truly understands your data platform. + +--- + +## 13. Appendix + +### 13.1 Research Sources + +**Goose (Block):** +- [GitHub](https://github.com/block/goose) | [Architecture](https://block.github.io/goose/docs/goose-architecture/) | [Extensions](https://block.github.io/goose/docs/getting-started/using-extensions/) | [Custom Extensions Tutorial](https://block.github.io/goose/docs/tutorials/custom-extensions/) | [Recipes](https://block.github.io/goose/docs/guides/recipes/) | [Custom Distros](https://github.com/block/goose/blob/main/CUSTOM_DISTROS.md) | [Browse Extensions](https://block.github.io/goose/extensions/) + +**Data Vendor MCP Servers:** +- [dbt MCP](https://docs.getdbt.com/docs/cloud/mcp-server) (58 tools) | [Dagster MCP](https://dagster.io/blog/dagsters-mcp-server) | [Airbyte MCP](https://airbyte.com/blog/how-we-built-an-mcp-server-to-create-data-pipelines) | [DataHub MCP](https://datahub.com/blog/datahub-mcp-server-block-ai-agents-use-case/) | [OpenMetadata Recipe](https://blog.open-metadata.org/announcing-our-first-openmetadata-goose-recipe-67d9249c2fd3) + +**Extension Ecosystems:** +- [agentskills.io](https://agentskills.io) (open standard, 30+ adopters) | [MCP Registry](https://registry.modelcontextprotocol.io) | [awesome-opencode](https://github.com/awesome-opencode/awesome-opencode) (50+ plugins) | [SkillsMP](https://skillsmp.com/) (2,300+ skills) | [awesome-agent-skills](https://github.com/heilcheng/awesome-agent-skills) (1,300+ skills) + +**Framework Patterns:** +- [Composio](https://github.com/ComposioHQ/composio) (hub-and-spoke providers) | [LangChain](https://github.com/langchain-ai/langchain) (separate packages) | [CrewAI](https://github.com/crewai/crewai) (decorator + class tools) | [Semantic Kernel](https://learn.microsoft.com/semantic-kernel/) (DI plugins, <20 tool recommendation) + +**Partner Ecosystem Benchmarks:** +- Marketplace review processes: 24 hours (Zoho) to 10 business days (HubSpot) +- Recertification: every 2 years +- VS Code pattern (5-day domain verification, automated checks) = lightest weight + +### 13.2 Glossary + +| Term | Definition | +|------|-----------| +| **SKILL.md** | Markdown file with YAML frontmatter teaching an AI how to approach a task | +| **MCP** | Model Context Protocol — standard for AI tools (Anthropic-led, adopted by industry) | +| **MCP Server** | A process that exposes tools/resources via the MCP protocol | +| **Plugin** | npm package that hooks into Altimate Code's runtime (auth, tools, chat) | +| **Kit** | YAML bundle of skills + MCP + plugins + instructions (KIT.yaml) | +| **Hook** | Interception point in plugin system (e.g., `tool.execute.before`) | +| **Agent Skills Standard** | Open standard at agentskills.io for portable AI skills | diff --git a/docs/docs/configure/index.md b/docs/docs/configure/index.md index d2df2d3ed8..aeefb15ca0 100644 --- a/docs/docs/configure/index.md +++ b/docs/docs/configure/index.md @@ -38,6 +38,14 @@ Set up your warehouses, LLM providers, and preferences. For agents, tools, skill [:octicons-arrow-right-24: MCP Servers](mcp-servers.md) · [:octicons-arrow-right-24: ACP Support](acp.md) +- :material-package-variant:{ .lg .middle } **Kits** + + --- + + Bundles of skills, MCP servers, and instructions. Activate a kit to get a complete development setup for dbt, Snowflake, Dagster, and more. + + [:octicons-arrow-right-24: Kits](kits.md) + - :material-palette:{ .lg .middle } **Appearance** --- diff --git a/docs/docs/configure/kits.md b/docs/docs/configure/kits.md new file mode 100644 index 0000000000..32ffb2ab6d --- /dev/null +++ b/docs/docs/configure/kits.md @@ -0,0 +1,192 @@ +# Kits + +Kits bundle skills, MCP servers, and instructions into a single activatable unit. Instead of configuring each piece separately, activate a kit to get a complete development setup. + +## Quick Start + +```bash +# List available kits +altimate-code kit list + +# Auto-detect kits for your project +altimate-code kit detect + +# Activate a kit +altimate-code kit activate dbt-snowflake + +# Check active kits +altimate-code kit status + +# Deactivate +altimate-code kit deactivate dbt-snowflake +``` + +## Installing Kits + +Install kits from GitHub repositories or local paths: + +```bash +# From GitHub +altimate-code kit install AltimateAI/data-engineering-skills + +# From local path +altimate-code kit install ./my-kits + +# Install globally (available in all projects) +altimate-code kit install AltimateAI/data-engineering-skills --global +``` + +## KIT.yaml Format + +Kits are defined in `KIT.yaml` files: + +```yaml +name: my-kit +description: What this kit configures +version: 1.0.0 + +# Skills to install +skills: + - source: "owner/repo" + select: ["skill-a", "skill-b"] + +# MCP servers to configure +mcp: + server-name: + type: stdio + command: ["uvx", "my-mcp-server"] + env_keys: ["API_KEY"] + description: "Server description" + +# Instructions for every conversation +instructions: | + Project-specific conventions and rules. + +# Auto-detection rules +detect: + - files: ["config.yaml"] + message: "Detected my-tool — activate kit?" +``` + +## What `kit activate` Does + +When you activate a kit, it: + +1. **Installs skills** from referenced repositories into `.opencode/skills/` +2. **Configures MCP servers** by merging entries into your project's config file +3. **Creates instruction files** at `.opencode/instructions/kit-.md` +4. **Registers the kit** as active in `.opencode/active-kits` + +All changes are reversible with `kit deactivate`. + +## Creating Your Own Kit + +```bash +altimate-code kit create my-team-standards +``` + +This scaffolds `.opencode/kits/my-team-standards/KIT.yaml` with a template. Edit it, then activate: + +```bash +altimate-code kit activate my-team-standards +``` + +### Validating + +Check your kit for issues before sharing: + +```bash +altimate-code kit validate my-team-standards +``` + +## Multiple Active Kits + +You can activate multiple kits simultaneously. Their MCP servers are merged and instruction files coexist: + +```bash +altimate-code kit activate dbt-snowflake +altimate-code kit activate my-team-standards +altimate-code kit status # shows both +``` + +## Trust Tiers + +| Tier | Description | +|------|-------------| +| `built-in` | Ships with Altimate Code, maintained by the team | +| `verified` | Published by official vendors, reviewed | +| `community` | Created by anyone, use at your discretion | + +## Kit Locations + +Kits are discovered from: + +1. **Project**: `.opencode/kits/` and `.altimate-code/kits/` +2. **Global**: `~/.config/altimate-code/kits/` +3. **Config paths**: `kits.paths` in your config file +4. **Installed**: `~/.local/share/altimate-code/kits/` + +## CLI Reference + +| Command | Description | +|---------|-------------| +| `kit list` | List all available kits | +| `kit list --json` | JSON output for scripting | +| `kit list --detect` | Show only project-matching kits | +| `kit create ` | Scaffold a new kit | +| `kit show ` | Display full kit details | +| `kit install ` | Install from GitHub or local path | +| `kit activate ` | Install skills, configure MCP, enable | +| `kit activate --yes` | Skip confirmation prompt | +| `kit deactivate ` | Remove from active kits, clean up | +| `kit remove ` | Delete an installed kit | +| `kit detect` | Find kits matching current project | +| `kit search [query]` | Search the kit registry | +| `kit status` | Show active kits | +| `kit validate [name]` | Validate kit format and references | + +## Sharing Kits + +Share kits via Git repositories. The recommended structure: + +``` +my-kits/ + kits/ + kit-a/KIT.yaml + kit-b/KIT.yaml + README.md +``` + +Others install with: `altimate-code kit install owner/my-kits` + +## Available Kits + +See [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) for the official kit registry. + +## Roadmap + +The kit system is actively evolving based on community feedback. Here's what's planned: + +### Coming Soon + +| Feature | Description | Status | +|---------|-------------|--------| +| **`kit switch`** | Switch between kits in one command (deactivate all, activate one) | Planned | +| **Kit inheritance** | `extends: base-kit` to share conventions across kits | Planned | +| **`kit update`** | Pull newer versions of installed kits from source | Planned | +| **Registry expansion** | More built-in kits for BigQuery, Databricks, Airflow, Dagster | In progress | +| **`kit enforce`** | CI command that fails if required kits are not active | Planned | + +### Future + +| Feature | Description | +|---------|-------------| +| **Auto-activation** | Automatically suggest or activate kits when detection rules match on project open | +| **Kit locking** | Prevent deactivation of compliance-critical kits without admin override | +| **Conflict detection** | Warn when two active kits have contradictory instructions | +| **Kit analytics** | Activation counts and skill usage metrics for kit authors | +| **MCP tool filtering** | Allow kits to expose only specific tools from an MCP server | + +### Contributing to the Roadmap + +Have a feature request? [Open an issue](https://github.com/AltimateAI/altimate-code/issues) with the `kit` label, or contribute directly to the [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) repo. diff --git a/docs/docs/develop/ecosystem.md b/docs/docs/develop/ecosystem.md index 3f847d5d41..92b789dd8f 100644 --- a/docs/docs/develop/ecosystem.md +++ b/docs/docs/develop/ecosystem.md @@ -18,6 +18,24 @@ altimate has a growing ecosystem of plugins, tools, and integrations. - **MCP**: Model Context Protocol servers - **ACP**: Agent Communication Protocol for editors +## Kits + +Kits bundle skills, MCP servers, and instructions into shareable development setups. Anyone can create and distribute kits. + +| Kit | Description | +|-----|-------------| +| [dbt-snowflake](https://github.com/AltimateAI/data-engineering-skills/tree/main/kits/dbt-snowflake) | Complete dbt + Snowflake setup | + +Browse the [kit registry](https://github.com/AltimateAI/data-engineering-skills/blob/main/registry.json) for more. + +### Creating Kits + +See the [Kit documentation](../configure/kits.md) for the full guide, or run: + +```bash +altimate-code kit create my-kit +``` + ## Community - [GitHub Repository](https://github.com/AltimateAI/altimate-code): Source code, issues, discussions diff --git a/docs/docs/develop/kits.md b/docs/docs/develop/kits.md new file mode 100644 index 0000000000..d450298bbe --- /dev/null +++ b/docs/docs/develop/kits.md @@ -0,0 +1,422 @@ +# Building Kits + +This guide is for anyone who wants to **create and distribute kits** — vendors, solution architects, team leads, or community contributors. For using kits, see [Configure > Kits](../configure/kits.md). + +## What's in a Kit? + +A kit is a `KIT.yaml` file that bundles: + +- **Skills** — teach the AI how to approach tasks (from any Git repo) +- **MCP servers** — give the AI tools to execute tasks (standard MCP protocol) +- **Instructions** — project-specific rules injected into every conversation +- **Detection rules** — auto-suggest the kit when matching files exist + +## Tutorial: Build Your First Kit in 5 Minutes + +### Step 1: Scaffold + +```bash +altimate-code kit create my-first-kit +``` + +This creates `.opencode/kits/my-first-kit/KIT.yaml`: + +```yaml +name: my-first-kit +description: TODO — describe what this kit configures +version: 1.0.0 + +skills: + # - source: "owner/repo" + # select: ["skill-a", "skill-b"] + +mcp: + # my-server: + # command: ["uvx", "my-mcp-server"] + # env_keys: ["MY_API_KEY"] + +detect: + # - files: ["config.yaml"] + # message: "Detected my-tool — activate kit?" + +instructions: | + TODO — add project-specific instructions here. +``` + +### Step 2: Edit + +Fill in real content. Here's a complete example for an internal team: + +```yaml +name: acme-data-team +description: ACME Corp data engineering standards and conventions +version: 1.0.0 + +skills: + - source: "AltimateAI/data-engineering-skills" + select: + - creating-dbt-models + - testing-dbt-models + - debugging-dbt-errors + +mcp: + dbt: + type: stdio + command: ["uvx", "dbt-mcp"] + env: + DBT_PROJECT_DIR: "./" + env_keys: ["DBT_PROJECT_DIR"] + description: "dbt MCP server for model development" + +detect: + - files: ["dbt_project.yml"] + message: "Detected dbt project — activate ACME data team kit?" + +instructions: | + ## ACME Data Team Conventions + + - Table naming: dim_*, fct_*, stg_*, int_* + - All models must have unique + not_null tests on primary keys + - Use ref() for all model references + - Warehouse sizing: XS for dev, M for staging, L for prod + - Code review required for any model touching PII columns +``` + +### Step 3: Validate + +```bash +altimate-code kit validate my-first-kit +``` + +Output: +``` +Validating: my-first-kit + + ✓ Name "my-first-kit" is valid + ✓ Description present + ✓ Version "1.0.0" is valid semver + ✓ 1 skill source(s) defined + ✓ MCP "dbt": command defined + ⚠ MCP "dbt": env var DBT_PROJECT_DIR is NOT set + ✓ 1 detection rule(s) defined + ✓ Instructions present (10 lines) + +Validation: PASS +``` + +### Step 4: Activate + +```bash +altimate-code kit activate my-first-kit +``` + +### Step 5: Share + +Commit the kit to your repo. Others install with: + +```bash +altimate-code kit install owner/repo +``` + +## KIT.yaml Schema Reference + +### Required Fields + +| Field | Type | Description | +|-------|------|-------------| +| `name` | string | Lowercase, hyphens, 2-64 chars. Must match `^[a-z][a-z0-9]*(-[a-z0-9]+)*$` | +| `description` | string | One-line summary of what the kit configures | + +### Optional Fields + +| Field | Type | Default | Description | +|-------|------|---------|-------------| +| `version` | string | `"1.0.0"` | Semver version | +| `author` | string | — | Author name or organization | +| `tier` | string | `"community"` | Trust tier: `built-in`, `verified`, `community`, `archived` | +| `skills` | array | `[]` | Skills to install (see below) | +| `skill_packs` | object | `{}` | Grouped skills with activation modes (see below) | +| `mcp` | object | `{}` | MCP servers to configure (see below) | +| `plugins` | array | `[]` | npm packages to install | +| `instructions` | string | — | Text injected into every AI conversation | +| `detect` | array | `[]` | File patterns that trigger kit suggestion | + +### Skills + +Skills reference external repositories containing `SKILL.md` files: + +```yaml +skills: + # Install specific skills from a repo + - source: "AltimateAI/data-engineering-skills" + select: + - creating-dbt-models + - testing-dbt-models + + # Install all skills from a repo (omit select) + - source: "owner/skills-repo" + + # Reference an already-installed skill by name + - "my-existing-skill" +``` + +The `source` field accepts: +- GitHub shorthand: `owner/repo` +- Full URL: `https://github.com/owner/repo` +- Local path: `./my-skills` + +### Skill Packs + +For kits with many skills, organize them into packs with activation modes: + +```yaml +skill_packs: + core: + description: "Essential skills loaded every session" + activation: always + skills: + - source: "owner/repo" + select: ["skill-a", "skill-b"] + + advanced: + description: "Skills loaded when matching files exist" + activation: detect + detect: + - files: ["**/advanced/**"] + skills: + - source: "owner/repo" + select: ["skill-c"] + + specialized: + description: "Skills loaded only on explicit request" + activation: manual + skills: + - source: "owner/repo" + select: ["skill-d"] +``` + +| Activation | Behavior | +|-----------|----------| +| `always` | Skills loaded every session when kit is active | +| `detect` | Skills loaded when matching files exist in the project | +| `manual` | Skills loaded only when the user explicitly requests them | + +!!! note + When `skill_packs` is present, it takes precedence over the flat `skills` array. Use one or the other, not both. + +### MCP Servers + +Configure MCP (Model Context Protocol) servers that give the AI tools to call: + +```yaml +mcp: + my-server: + type: stdio # "stdio" for local, "sse" or "remote" for HTTP + command: ["uvx", "my-server"] # Command to start the server + args: ["--port", "8080"] # Additional arguments (merged with command) + env: # Environment variables passed to the server + API_KEY: "default-value" + env_keys: ["API_KEY"] # Env vars the user must set (warns if missing) + description: "What this server provides" +``` + +**Type mapping:** The kit uses user-friendly names that are translated to the config format: + +| Kit type | Config type | Use case | +|----------|-----------|----------| +| `stdio` (default) | `local` | Local process via stdin/stdout | +| `sse` | `remote` | Server-sent events over HTTP | +| `streamable-http` | `remote` | Streamable HTTP | + +**Environment variables:** + +- `env`: Default values passed to the MCP server process +- `env_keys`: Names of variables the user must set. Kit activation warns if these are missing. Use this for API keys and secrets that shouldn't have defaults. + +### Detection Rules + +Auto-suggest the kit when certain files exist in the project: + +```yaml +detect: + - files: ["dbt_project.yml", "dbt_project.yaml"] + message: "Detected dbt project — activate this kit?" + + - files: ["**/dagster/**", "workspace.yaml"] + message: "Detected Dagster project" +``` + +- `files`: Array of glob patterns matched against the project directory +- `message`: Optional suggestion text shown to the user + +Users discover matching kits via `kit detect` or `kit list --detect`. The TUI also shows a nudge on startup when matching kits are found. + +### Instructions + +Free-form text injected into the AI's system context for every conversation when the kit is active: + +```yaml +instructions: | + ## Team Conventions + + - Use snake_case for all column names + - All monetary values in cents (integer), not dollars + - Every model must have a primary key test + - Do NOT use SELECT * in production models +``` + +**Best practices for instructions:** + +- Keep them under 50 lines — longer instructions consume more context tokens +- Be specific and actionable — "use snake_case" is better than "follow naming conventions" +- Use markdown headers to organize sections +- Include "DO NOT" rules for common mistakes +- Avoid duplicating what skills already teach + +## Publishing to the Registry + +The kit registry is hosted at [AltimateAI/data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills). + +### For Community Contributors + +1. Create your kit in your own GitHub repo +2. Test with `kit validate` and `kit activate` +3. Submit a PR to [data-engineering-skills](https://github.com/AltimateAI/data-engineering-skills) adding an entry to `registry.json`: + +```json +{ + "name": "my-kit", + "description": "What it does", + "version": "1.0.0", + "author": "Your Name", + "tier": "community", + "repo": "your-org/your-repo", + "path": "kits/my-kit", + "tags": ["dbt", "bigquery"], + "detect": ["dbt_project.yml"] +} +``` + +### For Vendors (Verified Tier) + +To get your kit listed as `verified`: + +1. Create skills and a kit in your organization's GitHub repo +2. Test thoroughly with `kit validate` and real-world projects +3. Submit a PR to the registry with `"tier": "verified"` +4. The Altimate team reviews the kit for quality and correctness +5. Once approved, your kit appears with a `[verified]` badge + +**Verified tier requirements:** + +- Skills follow the [Agent Skills](https://agentskills.io) specification +- MCP server is published to PyPI or npm +- Detection rules are accurate (no false positives) +- Instructions are clear and well-structured +- Kit is actively maintained + +## Examples + +### Instructions-Only Kit (Team Standards) + +No skills, no MCP — just team conventions: + +```yaml +name: team-standards +description: Engineering standards for the analytics team +version: 1.0.0 + +instructions: | + - All SQL in lowercase + - CTEs over subqueries + - No SELECT * in production + - Every PR needs a dbt test + +detect: + - files: ["dbt_project.yml"] +``` + +### MCP-Only Kit (Tool Integration) + +No skills, no instructions — just MCP configuration: + +```yaml +name: airbyte-connector +description: Airbyte PyAirbyte MCP server for data pipeline development +version: 1.0.0 + +mcp: + airbyte: + type: stdio + command: ["uvx", "pyairbyte-mcp"] + env_keys: ["AIRBYTE_API_KEY"] + description: "PyAirbyte — generate pipelines with 600+ connectors" + +detect: + - files: ["**/airbyte_*.py", "airbyte.yaml"] +``` + +### Full Kit (Skills + MCP + Instructions) + +The complete package: + +```yaml +name: dbt-snowflake +description: Complete dbt + Snowflake development setup +version: 1.0.0 +author: Altimate AI +tier: built-in + +skills: + - source: "AltimateAI/data-engineering-skills" + select: + - creating-dbt-models + - testing-dbt-models + - debugging-dbt-errors + +mcp: + dbt: + type: stdio + command: ["uvx", "dbt-mcp"] + env: + DBT_PROJECT_DIR: "./" + DBT_PROFILES_DIR: "~/.dbt" + env_keys: ["DBT_PROJECT_DIR", "DBT_PROFILES_DIR"] + description: "dbt MCP server — SQL execution, semantic layer, discovery API" + +instructions: | + This project uses dbt with Snowflake. + - Use ref() for all model references + - Follow staging → intermediate → marts layering + - Run dbt build (not just compile) to verify changes + +detect: + - files: ["dbt_project.yml"] + message: "Detected dbt project — activate dbt-snowflake kit?" +``` + +## Troubleshooting + +### Kit not showing in `kit list` + +- Check the `KIT.yaml` file is valid: `kit validate ` +- Ensure the file is named exactly `KIT.yaml` (case-sensitive) +- Check the kit directory is under `.opencode/kits/` or another scanned location + +### Skills fail to install during `kit activate` + +- The `source` repo must be accessible (public GitHub or reachable URL) +- Skills that already exist locally are skipped with a warning +- If a source fails, other components (MCP, instructions) still install + +### MCP server doesn't start after activation + +- Check `kit validate` for missing environment variables +- Set required env vars in your shell profile or `.env` file +- Verify the MCP command is installed: run the command manually (e.g., `uvx dbt-mcp --help`) + +### `kit deactivate` didn't clean up + +- `kit deactivate` removes: instruction files, active-kits entry, and MCP config entries +- Skills installed by the kit are NOT removed (they may be shared with other kits) +- To fully clean up skills, remove them from `.opencode/skills/` manually diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index aadb7db2f4..cae88d1793 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -121,6 +121,7 @@ nav: - MCPs & ACPs: - MCP Servers: configure/mcp-servers.md - ACP Support: configure/acp.md + - Kits: configure/kits.md - Appearance: - Themes: configure/themes.md - Keybinds: configure/keybinds.md @@ -147,4 +148,5 @@ nav: - SDK: develop/sdk.md - Server API: develop/server.md - Plugins: develop/plugins.md + - Building Kits: develop/kits.md - Ecosystem: develop/ecosystem.md diff --git a/packages/opencode/src/altimate/telemetry/index.ts b/packages/opencode/src/altimate/telemetry/index.ts index 9e1564eae6..c1d560c44b 100644 --- a/packages/opencode/src/altimate/telemetry/index.ts +++ b/packages/opencode/src/altimate/telemetry/index.ts @@ -387,6 +387,42 @@ export namespace Telemetry { source: "cli" | "tui" } // altimate_change end + // altimate_change start — kit: telemetry events for kit management + | { + type: "kit_created" + timestamp: number + session_id: string + kit_name: string + source: "cli" | "tui" + } + | { + type: "kit_installed" + timestamp: number + session_id: string + install_source: string + kit_count: number + kit_names: string[] + source: "cli" | "tui" + } + | { + type: "kit_applied" + timestamp: number + session_id: string + kit_name: string + skill_count: number + mcp_count: number + plugin_count: number + has_instructions: boolean + source: "cli" | "tui" + } + | { + type: "kit_removed" + timestamp: number + session_id: string + kit_name: string + source: "cli" | "tui" + } + // altimate_change end | { type: "sql_execute_failure" timestamp: number diff --git a/packages/opencode/src/cli/cmd/kit.ts b/packages/opencode/src/cli/cmd/kit.ts new file mode 100644 index 0000000000..abe560b049 --- /dev/null +++ b/packages/opencode/src/cli/cmd/kit.ts @@ -0,0 +1,1366 @@ +// altimate_change start — kit: top-level `kit` command for managing kit bundles +import { EOL } from "os" +import path from "path" +import fs from "fs/promises" +import { Kit } from "../../kit" +import { Skill } from "../../skill" +import { bootstrap } from "../bootstrap" +import { cmd } from "./cmd" +import { Instance } from "../../project/instance" +import { Global } from "@/global" +import { Telemetry } from "@/altimate/telemetry" +// altimate_change start — kit: jsonc-parser for comment-preserving config writes +import { modify, applyEdits } from "jsonc-parser" +// altimate_change end + +// --------------------------------------------------------------------------- +// KIT.yaml template +// --------------------------------------------------------------------------- + +function kitTemplate(name: string): string { + return `name: ${name} +description: TODO — describe what this kit configures +version: 1.0.0 + +# Skills to install (from external repos or already-installed names) +skills: + # - source: "owner/repo" + # select: ["skill-a", "skill-b"] + +# MCP servers to configure +mcp: + # my-server: + # command: ["uvx", "my-mcp-server"] + # env_keys: ["MY_API_KEY"] + +# Auto-detection rules +detect: + # - files: ["config.yaml"] + # message: "Detected my-tool — activate kit?" + +# Instructions added to every conversation +instructions: | + TODO — add project-specific instructions here. +` +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +async function findConfigFile(rootDir: string): Promise<{ filePath: string; config: Record }> { + const candidates = [ + path.join(rootDir, ".opencode", "opencode.json"), + path.join(rootDir, ".opencode", "opencode.jsonc"), + path.join(rootDir, ".altimate-code", "altimate-code.json"), + path.join(rootDir, ".altimate-code", "altimate-code.jsonc"), + path.join(rootDir, "opencode.json"), + path.join(rootDir, "opencode.jsonc"), + path.join(rootDir, "altimate-code.json"), + path.join(rootDir, "altimate-code.jsonc"), + ] + + for (const candidate of candidates) { + try { + const raw = await fs.readFile(candidate, "utf-8") + // Strip single-line comments for JSONC files + const cleaned = candidate.endsWith(".jsonc") + ? raw.replace(/^\s*\/\/.*$/gm, "").replace(/,(\s*[}\]])/g, "$1") + : raw + return { filePath: candidate, config: JSON.parse(cleaned) } + } catch { + // try next + } + } + + // No config found — create one in .opencode/ + const defaultPath = path.join(rootDir, ".opencode", "opencode.json") + await fs.mkdir(path.dirname(defaultPath), { recursive: true }) + const defaultConfig: Record = {} + await fs.writeFile(defaultPath, JSON.stringify(defaultConfig, null, 2) + EOL, "utf-8") + return { filePath: defaultPath, config: defaultConfig } +} + +// altimate_change start — kit: JSONC-aware config writes that preserve comments +async function writeConfigField(filePath: string, fieldPath: string[], value: unknown): Promise { + let text = "{}" + try { text = await fs.readFile(filePath, "utf-8") } catch {} + const edits = modify(text, fieldPath, value, { + formattingOptions: { tabSize: 2, insertSpaces: true }, + }) + const result = applyEdits(text, edits) + await fs.writeFile(filePath, result, "utf-8") +} + +async function removeConfigField(filePath: string, fieldPath: string[]): Promise { + let text: string + try { text = await fs.readFile(filePath, "utf-8") } catch { return false } + const edits = modify(text, fieldPath, undefined, { + formattingOptions: { tabSize: 2, insertSpaces: true }, + }) + if (edits.length === 0) return false + const result = applyEdits(text, edits) + await fs.writeFile(filePath, result, "utf-8") + return true +} +// altimate_change end + +async function cloneSource(source: string): Promise<{ dir: string; cloned: boolean }> { + let url: string | undefined + let normalized = source.trim().replace(/\.git$/, "") + + // Normalize GitHub web URLs (e.g. /tree/main/path) + const ghWebMatch = normalized.match(/^https?:\/\/github\.com\/([^/]+\/[^/]+?)(?:\/(?:tree|blob)\/.*)?$/) + if (ghWebMatch) { + url = `https://github.com/${ghWebMatch[1]}.git` + } else if (normalized.startsWith("http://") || normalized.startsWith("https://")) { + url = normalized + } else if (normalized.match(/^[a-zA-Z0-9_-]+\/[a-zA-Z0-9._-]+$/)) { + // Check if it's a local path first (e.g., "examples/kits" looks like "owner/repo") + const resolvedLocal = path.isAbsolute(normalized) ? normalized : path.resolve(normalized) + try { + await fs.access(resolvedLocal) + // It exists on disk — treat as local path, not GitHub shorthand + return { dir: resolvedLocal, cloned: false } + } catch { + // Not a local path — treat as GitHub shorthand + url = `https://github.com/${normalized}.git` + } + } + + if (url) { + const tmpDir = path.join(Global.Path.cache, "kit-install-" + Date.now()) + const proc = Bun.spawnSync(["git", "clone", "--depth", "1", "--", url, tmpDir], { + stdout: "pipe", + stderr: "pipe", + }) + if (proc.exitCode !== 0) { + throw new Error(`Failed to clone ${url}: ${proc.stderr.toString()}`) + } + return { dir: tmpDir, cloned: true } + } + + // Local path + const resolved = path.isAbsolute(normalized) ? normalized : path.resolve(normalized) + try { + await fs.access(resolved) + } catch { + throw new Error(`Path not found: ${resolved}`) + } + return { dir: resolved, cloned: false } +} + +async function cleanupTmp(dir: string, cloned: boolean) { + if (cloned && dir.startsWith(Global.Path.cache)) { + await fs.rm(dir, { recursive: true, force: true }) + } +} + +// --------------------------------------------------------------------------- +// Subcommands +// --------------------------------------------------------------------------- + +const KitListCommand = cmd({ + command: "list", + describe: "list all available kits", + builder: (yargs) => + yargs + .option("json", { + type: "boolean", + describe: "output as JSON", + default: false, + }) + .option("detect", { + type: "boolean", + describe: "show only kits matching the current project", + default: false, + }), + async handler(args) { + await bootstrap(process.cwd(), async () => { + let kits = await Kit.all() + + if (args.detect) { + const detected = await Kit.detect() + const detectedNames = new Set(detected.map((d) => d.kit.name)) + kits = kits.filter((r) => detectedNames.has(r.name)) + } + + // Sort alphabetically + kits.sort((a, b) => a.name.localeCompare(b.name)) + + if (args.json) { + // altimate_change start — kit: add tier + skill_packs to JSON output + const enriched = kits.map((kit) => { + const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + return { + name: kit.name, + tier: kit.tier || "community", + version: kit.version, + author: kit.author, + description: kit.description, + components: { + skills: hasPacks + ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(kit.skills) ? kit.skills.length : 0), + skill_packs: hasPacks ? Object.keys(kit.skill_packs!).length : 0, + mcp: kit.mcp ? Object.keys(kit.mcp).length : 0, + plugins: Array.isArray(kit.plugins) ? kit.plugins.length : 0, + }, + location: kit.location, + } + }) + // altimate_change end + process.stdout.write(JSON.stringify(enriched, null, 2) + EOL) + return + } + + // Human-readable table output + if (kits.length === 0) { + if (args.detect) { + process.stdout.write("No kits matched detection rules for this project." + EOL) + process.stdout.write(EOL + `See all kits: altimate-code kit list` + EOL) + } else { + process.stdout.write("No kits found." + EOL) + process.stdout.write(EOL + `Create one with: altimate-code kit create ` + EOL) + } + return + } + + // altimate_change start — kit: add tier column to table output + // Calculate column widths + const nameWidth = Math.max(6, ...kits.map((r) => r.name.length)) + const tierWidth = 12 + const versionWidth = Math.max(7, ...kits.map((r) => (r.version || "").length)) + + const header = `${"KIT".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} ${"VERSION".padEnd(versionWidth)} ${"COMPONENTS".padEnd(20)} DESCRIPTION` + const separator = "─".repeat(header.length) + + process.stdout.write(EOL) + process.stdout.write(header + EOL) + process.stdout.write(separator + EOL) + + for (const kit of kits) { + // Count skills from skill_packs if present, otherwise flat skills array + const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + const skillCount = hasPacks + ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(kit.skills) ? kit.skills.length : 0) + const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 + const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 + const packCount = hasPacks ? Object.keys(kit.skill_packs!).length : 0 + const components = hasPacks + ? `${skillCount}sk ${packCount}pk ${mcpCount}mcp` + : `${skillCount}sk ${mcpCount}mcp ${pluginCount}pl` + + const tier = kit.tier || "community" + const tierBadge = tier !== "community" ? `[${tier}]` : "" + + let desc = kit.description || "" + if (desc.length > 50) { + desc = desc.slice(0, 50) + const lastSpace = desc.lastIndexOf(" ") + if (lastSpace > 30) desc = desc.slice(0, lastSpace) + desc += "..." + } + + process.stdout.write( + `${kit.name.padEnd(nameWidth)} ${tierBadge.padEnd(tierWidth)} ${(kit.version || "—").padEnd(versionWidth)} ${components.padEnd(20)} ${desc}` + EOL, + ) + } + // altimate_change end + + process.stdout.write(EOL) + process.stdout.write(`${kits.length} kit(s) found.` + EOL) + process.stdout.write(`Create a new kit: altimate-code kit create ` + EOL) + }) + }, +}) + +const KitCreateCommand = cmd({ + command: "create ", + describe: "scaffold a new kit", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to create", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + + // Validate name before bootstrap (fast fail) + if (!/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(name) || name.length < 2) { + process.stderr.write( + `Error: Kit name must be lowercase alphanumeric with hyphens, at least 2 chars (e.g., "dbt-snowflake")` + EOL, + ) + process.exit(1) + } + if (name.length > 64) { + process.stderr.write(`Error: Kit name must be 64 characters or fewer` + EOL) + process.exit(1) + } + + await bootstrap(process.cwd(), async () => { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + + const kitDir = path.join(rootDir, ".opencode", "kits", name) + const kitFile = path.join(kitDir, "KIT.yaml") + + try { + await fs.access(kitFile) + process.stderr.write(`Error: Kit already exists at ${kitFile}` + EOL) + process.exit(1) + } catch { + // File doesn't exist, good + } + + await fs.mkdir(kitDir, { recursive: true }) + await fs.writeFile(kitFile, kitTemplate(name), "utf-8") + process.stdout.write(`✓ Created kit: ${path.relative(rootDir, kitFile)}` + EOL) + + // altimate_change start — telemetry + try { + Telemetry.track({ + type: "kit_created", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + kit_name: name, + source: "cli", + }) + } catch {} + // altimate_change end + + process.stdout.write(EOL) + process.stdout.write(`Next steps:` + EOL) + process.stdout.write(` 1. Edit .opencode/kits/${name}/KIT.yaml — configure skills, MCP servers, and instructions` + EOL) + process.stdout.write(` 2. Activate it: altimate-code kit activate ${name}` + EOL) + }) + }, +}) + +const KitShowCommand = cmd({ + command: "show ", + describe: "display kit details", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to show", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + const kit = await Kit.get(name) + if (!kit) { + process.stderr.write(`Error: Kit "${name}" not found.` + EOL) + process.exit(1) + } + + const hasPacks = kit.skill_packs && Object.keys(kit.skill_packs).length > 0 + const skillCount = hasPacks + ? Object.values(kit.skill_packs!).reduce((sum, pack) => sum + (pack.skills?.length || 0), 0) + : (Array.isArray(kit.skills) ? kit.skills.length : 0) + const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 + const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 + + process.stdout.write(EOL) + process.stdout.write(` Name: ${kit.name}` + EOL) + process.stdout.write(` Description: ${kit.description || "—"}` + EOL) + process.stdout.write(` Version: ${kit.version || "—"}` + EOL) + process.stdout.write(` Author: ${kit.author || "—"}` + EOL) + process.stdout.write(` Tier: ${kit.tier || "community"}` + EOL) + process.stdout.write(` Location: ${kit.location}` + EOL) + process.stdout.write(EOL) + + // Skill packs (if present, takes precedence over flat skills) + if (hasPacks) { + const packs = Object.entries(kit.skill_packs!) + process.stdout.write(` Skill Packs (${packs.length}):` + EOL) + for (const [packName, pack] of packs) { + const badge = pack.activation === "always" ? "●" : pack.activation === "detect" ? "◐" : "○" + process.stdout.write(` ${badge} ${packName} (${pack.activation}, ${pack.skills.length} skills)` + EOL) + if (pack.description) { + process.stdout.write(` ${pack.description}` + EOL) + } + for (const skill of pack.skills) { + if (typeof skill === "string") { + process.stdout.write(` - ${skill}` + EOL) + } else { + const selected = skill.select ? ` [${skill.select.join(", ")}]` : "" + process.stdout.write(` - ${skill.source}${selected}` + EOL) + } + } + } + } else { + // Flat skills + process.stdout.write(` Skills (${skillCount}):` + EOL) + if (skillCount > 0) { + for (const skill of kit.skills!) { + if (typeof skill === "string") { + process.stdout.write(` - ${skill}` + EOL) + } else { + const selected = skill.select ? ` [${skill.select.join(", ")}]` : "" + process.stdout.write(` - ${skill.source}${selected}` + EOL) + } + } + } else { + process.stdout.write(` (none)` + EOL) + } + } + + // MCP servers + process.stdout.write(` MCP Servers (${mcpCount}):` + EOL) + if (mcpCount > 0) { + for (const [serverName, serverConfig] of Object.entries(kit.mcp!)) { + const desc = (serverConfig as Record).description || "" + process.stdout.write(` - ${serverName}${desc ? `: ${desc}` : ""}` + EOL) + } + } else { + process.stdout.write(` (none)` + EOL) + } + + // Plugins + process.stdout.write(` Plugins (${pluginCount}):` + EOL) + if (pluginCount > 0) { + for (const plugin of kit.plugins!) { + process.stdout.write(` - ${plugin}` + EOL) + } + } else { + process.stdout.write(` (none)` + EOL) + } + + // Detection rules + const detectCount = Array.isArray(kit.detect) ? kit.detect.length : 0 + if (detectCount > 0) { + process.stdout.write(EOL) + process.stdout.write(` Detection Rules (${detectCount}):` + EOL) + for (const rule of kit.detect!) { + const files = Array.isArray(rule.files) ? rule.files.join(", ") : "—" + process.stdout.write(` - files: [${files}]` + EOL) + if (rule.message) { + process.stdout.write(` message: ${rule.message}` + EOL) + } + } + } + + // Instructions + if (kit.instructions) { + process.stdout.write(EOL + "─".repeat(60) + EOL + EOL) + process.stdout.write(`Instructions:` + EOL + EOL) + process.stdout.write(kit.instructions + EOL) + } + }) + }, +}) + +const KitInstallCommand = cmd({ + command: "install ", + describe: "install a kit from GitHub or a local path", + builder: (yargs) => + yargs + .positional("source", { + type: "string", + describe: "GitHub repo (owner/repo), URL, or local path", + demandOption: true, + }) + .option("global", { + alias: "g", + type: "boolean", + describe: "install globally instead of per-project", + default: false, + }), + async handler(args) { + const source = (args.source as string).trim().replace(/\.git$/, "") + const isGlobal = args.global as boolean + + if (!source) { + process.stderr.write(`Error: Source is required. Use owner/repo, URL, or local path.` + EOL) + process.exit(1) + } + + await bootstrap(process.cwd(), async () => { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const targetDir = isGlobal + ? path.join(Global.Path.config, "kits") + : path.join(rootDir, ".opencode", "kits") + + let fetchDir: string + let cloned = false + + try { + const result = await cloneSource(source) + fetchDir = result.dir + cloned = result.cloned + if (cloned) { + process.stdout.write(`Fetching from ${source}...` + EOL) + } + } catch (err) { + process.stderr.write(`Error: ${(err as Error).message}` + EOL) + process.exit(1) + return // unreachable but satisfies TS + } + + // Find all KIT.yaml / KIT.yml / KIT.md files + const { Glob: BunGlob } = globalThis.Bun + const patterns = ["**/KIT.yaml", "**/KIT.yml", "**/KIT.md"] + const matches: string[] = [] + for (const pattern of patterns) { + const glob = new BunGlob(pattern) + for await (const match of glob.scan({ cwd: fetchDir, absolute: true })) { + if (!match.includes("/.git/")) matches.push(match) + } + } + + if (matches.length === 0) { + process.stderr.write(`Error: No KIT.yaml/KIT.yml/KIT.md files found in ${source}` + EOL) + await cleanupTmp(fetchDir, cloned) + process.exit(1) + } + + let installed = 0 + const installedNames: string[] = [] + + for (const kitFile of matches) { + const kitParent = path.dirname(kitFile) + + // Parse the YAML to get the kit name (don't rely on directory name) + let kitName: string + try { + const matter = (await import("gray-matter")).default + const raw = await fs.readFile(kitFile, "utf-8") + const ext = path.extname(kitFile).toLowerCase() + const parsed = ext === ".md" ? matter(raw) : matter("---\n" + raw + "\n---") + kitName = (parsed.data.name as string) || path.basename(kitParent) + } catch { + kitName = path.basename(kitParent) + } + + // Avoid using temp dir names as kit names + if (kitName.startsWith("kit-install-")) { + process.stdout.write(` ⚠ Skipping "${kitFile}" — could not determine kit name` + EOL) + continue + } + + const dest = path.join(targetDir, kitName) + + // Check if already installed + try { + await fs.access(dest) + process.stdout.write(` ⚠ Skipping "${kitName}" — already exists` + EOL) + continue + } catch { + // Not installed, proceed + } + + // Copy only the kit directory (not repo root — skip .git, node_modules, etc.) + await fs.mkdir(dest, { recursive: true }) + const files = await fs.readdir(kitParent) + for (const file of files) { + // Skip common non-kit files when copying from repo root + if ([".git", "node_modules", ".github", "LICENSE", "README.md"].includes(file)) continue + const src = path.join(kitParent, file) + const dst = path.join(dest, file) + const stat = await fs.lstat(src) + if (stat.isSymbolicLink()) continue + if (stat.isFile()) { + await fs.copyFile(src, dst) + } else if (stat.isDirectory()) { + await fs.cp(src, dst, { recursive: true, dereference: false }) + } + } + process.stdout.write(` ✓ Installed "${kitName}" → ${path.relative(rootDir, dest)}` + EOL) + installedNames.push(kitName) + installed++ + } + + await cleanupTmp(fetchDir, cloned) + + process.stdout.write(EOL) + if (installed > 0) { + process.stdout.write(`${installed} kit(s) installed${isGlobal ? " globally" : ""}.` + EOL) + // altimate_change start — telemetry + try { + Telemetry.track({ + type: "kit_installed", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + install_source: source, + kit_count: installed, + kit_names: installedNames, + source: "cli", + }) + } catch {} + // altimate_change end + } else { + process.stdout.write(`No new kits installed.` + EOL) + } + }) + }, +}) + +// altimate_change start — kit: KitApplyCommand removed, functionality merged into KitActivateCommand +// altimate_change end + +const KitRemoveCommand = cmd({ + command: "remove ", + describe: "remove an installed kit", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to remove", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + const kit = await Kit.get(name) + if (!kit) { + process.stderr.write(`Error: Kit "${name}" not found.` + EOL) + process.exit(1) + } + + // Check if kit is tracked by git (part of the repo, not user-installed) + const kitDir = path.dirname(kit.location) + const gitCheck = Bun.spawnSync(["git", "ls-files", "--error-unmatch", kit.location], { + cwd: path.dirname(kitDir), + stdout: "pipe", + stderr: "pipe", + }) + if (gitCheck.exitCode === 0) { + process.stderr.write(`Error: Cannot remove "${name}" — it is tracked by git.` + EOL) + process.stderr.write(`This kit is part of the repository, not user-installed.` + EOL) + process.exit(1) + } + + // Safety: only remove if the directory looks like a kit directory + // (contains the KIT file and is not a top-level scan directory) + const kitBasename = path.basename(kitDir) + if (kitBasename === "kits" || kitBasename === "kit" || kitDir === Instance.directory) { + // The KIT.yaml is at a scan root — only remove the file, not the directory + await fs.rm(kit.location, { force: true }) + process.stdout.write(` ✓ Removed kit file: ${kit.location}` + EOL) + } else { + await fs.rm(kitDir, { recursive: true, force: true }) + process.stdout.write(` ✓ Removed kit: ${kitDir}` + EOL) + } + + // Deactivate if active, then invalidate cache + await Kit.deactivate(name) + Kit.invalidate() + + // altimate_change start — kit: clean up instruction file on remove + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const instructionsFile = path.join(rootDir, ".opencode", "instructions", `kit-${name}.md`) + try { + await fs.access(instructionsFile) + await fs.rm(instructionsFile, { force: true }) + process.stdout.write(` ✓ Removed instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) + } catch { + // No instructions file, that's fine + } + // altimate_change end + + // altimate_change start — telemetry + try { + Telemetry.track({ + type: "kit_removed", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + kit_name: name, + source: "cli", + }) + } catch {} + // altimate_change end + + process.stdout.write(EOL + `Kit "${name}" removed.` + EOL) + }) + }, +}) + +const KitDetectCommand = cmd({ + command: "detect", + describe: "auto-detect which kits match the current project", + builder: (yargs) => yargs, + async handler() { + await bootstrap(process.cwd(), async () => { + const detected = await Kit.detect() + + if (detected.length === 0) { + process.stdout.write("No matching kits detected for this project." + EOL) + process.stdout.write(EOL + `Browse available kits: altimate-code kit list` + EOL) + return + } + + process.stdout.write(EOL) + process.stdout.write(`Detected ${detected.length} matching kit(s):` + EOL + EOL) + + for (const match of detected) { + process.stdout.write(` ${match.kit.name}` + EOL) + if (match.kit.description) { + process.stdout.write(` ${match.kit.description}` + EOL) + } + if (match.matched && match.matched.length > 0) { + process.stdout.write(` Matched files: ${match.matched.join(", ")}` + EOL) + } + // Show the first detection rule that has a message + const firstRuleWithMessage = match.kit.detect?.find((d) => d.message) + if (firstRuleWithMessage?.message) { + process.stdout.write(` ${firstRuleWithMessage.message}` + EOL) + } + process.stdout.write(EOL) + } + + process.stdout.write(`Activate a kit: altimate-code kit activate ` + EOL) + }) + }, +}) + +// altimate_change start — kit: activate subcommand (merged apply + activate into one command) +const KitActivateCommand = cmd({ + command: "activate ", + describe: "activate a kit — install skills, configure MCP, and enable for this project", + builder: (yargs) => + yargs + .positional("name", { + type: "string", + describe: "name of the kit to activate", + demandOption: true, + }) + .option("yes", { + alias: "y", + type: "boolean", + describe: "skip confirmation prompt", + default: false, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + const kit = await Kit.get(name) + if (!kit) { + process.stderr.write(`Error: Kit "${name}" not found. Install it first with: altimate-code kit install ` + EOL) + process.exit(1) + } + + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const tier = kit.tier || "community" + const tierBadge = tier !== "community" ? ` [${tier}]` : "" + + // Get all skills — from skill_packs if present, otherwise flat skills + const allSkills = (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) + ? Kit.allSkillsFromPacks(kit) + : (kit.skills || []) + const skillCount = allSkills.length + const mcpCount = kit.mcp ? Object.keys(kit.mcp).length : 0 + const pluginCount = Array.isArray(kit.plugins) ? kit.plugins.length : 0 + const hasInstructions = !!kit.instructions && !kit.instructions.startsWith("TODO") + + // --- Preview --- + process.stdout.write(EOL) + process.stdout.write(`Kit: ${kit.name}${tierBadge} (v${kit.version || "0.0.0"})` + EOL) + process.stdout.write(`${kit.description || ""}` + EOL) + process.stdout.write(EOL + "The following changes will be applied:" + EOL + EOL) + + if (skillCount > 0) { + process.stdout.write(` Skills (${skillCount}):` + EOL) + for (const skill of allSkills) { + if (typeof skill === "string") { + process.stdout.write(` + ${skill} (reference existing)` + EOL) + } else { + const selected = skill.select ? skill.select.join(", ") : "all" + process.stdout.write(` + ${skill.source} [${selected}]` + EOL) + } + } + process.stdout.write(EOL) + } + + if (mcpCount > 0) { + process.stdout.write(` MCP Servers (${mcpCount}):` + EOL) + for (const [serverName, serverConfig] of Object.entries(kit.mcp!)) { + const desc = (serverConfig as Record).description || "" + process.stdout.write(` + ${serverName}${desc ? ` — ${desc}` : ""}` + EOL) + } + process.stdout.write(EOL) + } + + if (hasInstructions) { + process.stdout.write(` Instructions:` + EOL) + process.stdout.write(` + .opencode/instructions/kit-${name}.md` + EOL) + process.stdout.write(EOL) + } + + if (skillCount === 0 && mcpCount === 0 && pluginCount === 0 && !hasInstructions) { + // Still activate (add to active-kits) even if empty — user explicitly asked + await Kit.activate(name) + Kit.invalidate() + process.stdout.write(`Kit "${name}" activated (no changes to apply — kit is empty).` + EOL) + return + } + + // --- Confirmation --- + if (!args.yes) { + process.stdout.write(`Activate this kit? [y/N] `) + const response = await new Promise((resolve) => { + let data = "" + const onData = (chunk: Buffer) => { + data += chunk.toString() + if (data.includes("\n")) { + process.stdin.removeListener("data", onData) + process.stdin.pause() + resolve(data.trim().toLowerCase()) + } + } + const onEnd = () => { + process.stdin.removeListener("data", onData) + resolve(data.trim().toLowerCase()) + } + process.stdin.resume() + process.stdin.on("data", onData) + process.stdin.on("end", onEnd) + }) + + if (response !== "y" && response !== "yes") { + process.stdout.write(`Cancelled.` + EOL) + return + } + } + + process.stdout.write(EOL) + + // altimate_change start — kit: track skill install failures for accurate status message + let skillFailures = 0 + // altimate_change end + + // --- 1. Install skills --- + if (skillCount > 0) { + for (const skill of allSkills) { + if (typeof skill === "string") { + const existing = await Skill.get(skill) + if (!existing) { + process.stdout.write(` ⚠ Skill "${skill}" not found — install it separately` + EOL) + } else { + process.stdout.write(` ✓ Skill "${skill}" already available` + EOL) + } + } else { + let fetchDir: string + let cloned = false + try { + const result = await cloneSource(skill.source) + fetchDir = result.dir + cloned = result.cloned + } catch (err) { + process.stdout.write(` ✗ Failed to fetch ${skill.source}: ${(err as Error).message}` + EOL) + skillFailures++ + continue + } + + const { Glob: BunGlob } = globalThis.Bun + const glob = new BunGlob("**/SKILL.md") + const skillMatches: string[] = [] + for await (const match of glob.scan({ cwd: fetchDir, absolute: true })) { + if (!match.includes("/.git/")) skillMatches.push(match) + } + + const targetSkillsDir = path.join(rootDir, ".opencode", "skills") + + for (const skillFile of skillMatches) { + const skillParent = path.dirname(skillFile) + const skillName = path.basename(skillParent) + + if (skill.select && !skill.select.includes(skillName)) continue + + const dest = path.join(targetSkillsDir, skillName) + try { + await fs.access(dest) + process.stdout.write(` ⚠ Skill "${skillName}" already exists, skipping` + EOL) + continue + } catch { /* good */ } + + await fs.mkdir(dest, { recursive: true }) + const files = await fs.readdir(skillParent) + for (const file of files) { + const src = path.join(skillParent, file) + const dst = path.join(dest, file) + const stat = await fs.lstat(src) + if (stat.isSymbolicLink()) continue + if (stat.isFile()) { + await fs.copyFile(src, dst) + } else if (stat.isDirectory()) { + await fs.cp(src, dst, { recursive: true, dereference: false }) + } + } + process.stdout.write(` ✓ Installed skill "${skillName}"` + EOL) + } + + await cleanupTmp(fetchDir, cloned) + } + } + } + + // --- 2. Configure MCP servers and plugins (JSONC-aware, preserves comments) --- + if (mcpCount > 0 || pluginCount > 0) { + const { filePath } = await findConfigFile(rootDir) + const missingEnvKeys: string[] = [] + + if (mcpCount > 0) { + for (const [serverName, serverDef] of Object.entries(kit.mcp!)) { + const def = serverDef as Record + const kitType = (def.type as string) || "stdio" + let configEntry: Record + + if (kitType === "sse" || kitType === "streamable-http" || kitType === "remote") { + configEntry = { type: "remote", url: def.url as string, ...(def.headers ? { headers: def.headers } : {}) } + } else { + const command = [...((def.command as string[]) || []), ...((def.args as string[]) || [])] + configEntry = { type: "local", command, ...(def.env ? { environment: def.env } : {}) } + } + + // Write each MCP server using JSONC-preserving modify + await writeConfigField(filePath, ["mcp", serverName], configEntry) + process.stdout.write(` ✓ Configured MCP server "${serverName}"` + EOL) + + const envKeys = def.env_keys + if (Array.isArray(envKeys)) { + for (const key of envKeys as string[]) { + if (!process.env[key]) missingEnvKeys.push(key) + } + } + } + } + + if (pluginCount > 0) { + // Read current plugins, add new ones, write back + const { config } = await findConfigFile(rootDir) + const plugins = (config.plugin ?? []) as string[] + let changed = false + for (const plugin of kit.plugins!) { + if (!plugins.includes(plugin)) { + plugins.push(plugin) + changed = true + process.stdout.write(` ✓ Added plugin "${plugin}"` + EOL) + } + } + if (changed) { + await writeConfigField(filePath, ["plugin"], plugins) + } + } + + process.stdout.write(` ✓ Updated config: ${path.relative(rootDir, filePath)}` + EOL) + + if (missingEnvKeys.length > 0) { + process.stdout.write(EOL) + process.stdout.write(` ⚠ Missing environment variables:` + EOL) + for (const key of missingEnvKeys) { + process.stdout.write(` - ${key}` + EOL) + } + process.stdout.write(` Set them in your shell profile or .env file.` + EOL) + } + } + + // --- 3. Add instructions --- + if (hasInstructions) { + const instructionsDir = path.join(rootDir, ".opencode", "instructions") + const instructionsFile = path.join(instructionsDir, `kit-${name}.md`) + await fs.mkdir(instructionsDir, { recursive: true }) + await fs.writeFile(instructionsFile, kit.instructions!, "utf-8") + process.stdout.write(` ✓ Created instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) + } + + // --- 4. Activate (add to active-kits) --- + await Kit.activate(name) + Kit.invalidate() + + process.stdout.write(EOL) + // altimate_change start — kit: report partial failures in activation message + if (skillFailures > 0) { + process.stdout.write(`Kit "${name}" activated with ${skillFailures} skill source(s) unavailable.` + EOL) + process.stdout.write(`Run 'altimate-code kit show ${name}' to see expected skills.` + EOL) + } else { + process.stdout.write(`Kit "${name}" activated successfully.` + EOL) + } + // altimate_change end + + try { + Telemetry.track({ + type: "kit_applied", + timestamp: Date.now(), + session_id: Telemetry.getContext().sessionId || "", + kit_name: name, + skill_count: skillCount, + mcp_count: mcpCount, + plugin_count: pluginCount, + has_instructions: hasInstructions, + source: "cli", + }) + } catch {} + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: deactivate subcommand +const KitDeactivateCommand = cmd({ + command: "deactivate ", + describe: "deactivate a kit for the current project", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to deactivate", + demandOption: true, + }), + async handler(args) { + const name = args.name as string + await bootstrap(process.cwd(), async () => { + // Read kit BEFORE deactivating so we know what MCP servers to clean + const kit = await Kit.get(name) + + await Kit.deactivate(name) + process.stdout.write(`✓ Deactivated kit: ${name}` + EOL) + + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + + // altimate_change start — kit: clean up instruction file on deactivate + const instructionsFile = path.join(rootDir, ".opencode", "instructions", `kit-${name}.md`) + try { + await fs.access(instructionsFile) + await fs.rm(instructionsFile, { force: true }) + process.stdout.write(` ✓ Removed instructions: ${path.relative(rootDir, instructionsFile)}` + EOL) + } catch {} + // altimate_change end + + // altimate_change start — kit: clean up MCP config entries added by this kit (JSONC-preserving) + if (kit?.mcp && Object.keys(kit.mcp).length > 0) { + try { + const { filePath } = await findConfigFile(rootDir) + let removed = 0 + for (const serverName of Object.keys(kit.mcp)) { + if (await removeConfigField(filePath, ["mcp", serverName])) { + removed++ + } + } + if (removed > 0) { + process.stdout.write(` ✓ Removed ${removed} MCP server(s) from config` + EOL) + } + } catch {} + } + // altimate_change end + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: search subcommand +const REGISTRY_URL = "https://raw.githubusercontent.com/AltimateAI/data-engineering-skills/main/registry.json" + +const KitSearchCommand = cmd({ + command: "search [query]", + describe: "search the kit registry", + builder: (yargs) => + yargs + .positional("query", { + type: "string", + describe: "search query (matches name, description, tags)", + }) + .option("json", { + type: "boolean", + describe: "output as JSON", + default: false, + }), + async handler(args) { + const query = ((args.query as string) || "").toLowerCase().trim() + + await bootstrap(process.cwd(), async () => { + process.stdout.write(`Searching kit registry...` + EOL) + + // altimate_change start — kit: graceful 404 + timeout for registry fetch + let registry: any + const controller = new AbortController() + const timeout = setTimeout(() => controller.abort(), 5000) + try { + const response = await fetch(REGISTRY_URL, { signal: controller.signal }) + clearTimeout(timeout) + if (!response.ok) { + if (response.status === 404) { + process.stdout.write(`Kit registry not available yet.` + EOL) + process.stdout.write(EOL + `Browse local kits: altimate-code kit list` + EOL) + process.stdout.write(`Create your own: altimate-code kit create ` + EOL) + return + } + process.stderr.write(`Error: Failed to fetch registry (${response.status})` + EOL) + process.exit(1) + } + registry = await response.json() + } catch (err) { + clearTimeout(timeout) + if ((err as Error).name === "AbortError") { + process.stdout.write(`Kit registry unavailable (timeout).` + EOL) + } else { + process.stderr.write(`Error: Failed to fetch registry: ${(err as Error).message}` + EOL) + } + process.stdout.write(EOL + `Browse local kits: altimate-code kit list` + EOL) + process.exit(1) + } + // altimate_change end + + const kits = (registry.kits || []) as Array<{ + name: string + description: string + version: string + author: string + tier: string + repo: string + path: string + tags: string[] + detect: string[] + stats?: { installs?: number; last_updated?: string } + }> + + // Filter by query + const results = query + ? kits.filter((r) => { + const searchable = [r.name, r.description, ...(r.tags || []), r.author || ""].join(" ").toLowerCase() + return searchable.includes(query) + }) + : kits + + if (args.json) { + process.stdout.write(JSON.stringify(results, null, 2) + EOL) + return + } + + if (results.length === 0) { + process.stdout.write(`No kits found${query ? ` matching "${query}"` : ""}.` + EOL) + return + } + + // Table output + const nameWidth = Math.max(6, ...results.map((r) => r.name.length)) + const tierWidth = 10 + + const header = `${"KIT".padEnd(nameWidth)} ${"TIER".padEnd(tierWidth)} DESCRIPTION` + const separator = "─".repeat(header.length) + + process.stdout.write(EOL) + process.stdout.write(header + EOL) + process.stdout.write(separator + EOL) + + for (const kit of results) { + let desc = kit.description || "" + if (desc.length > 50) { + desc = desc.slice(0, 50) + const lastSpace = desc.lastIndexOf(" ") + if (lastSpace > 30) desc = desc.slice(0, lastSpace) + desc += "..." + } + + const tier = kit.tier || "community" + process.stdout.write(`${kit.name.padEnd(nameWidth)} ${tier.padEnd(tierWidth)} ${desc}` + EOL) + } + + process.stdout.write(EOL) + process.stdout.write(`${results.length} kit(s) found in registry.` + EOL) + process.stdout.write(`Install with: altimate-code kit install ` + EOL) + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: status subcommand +const KitStatusCommand = cmd({ + command: "status", + describe: "show active kits for the current project", + builder: (yargs) => yargs, + async handler() { + await bootstrap(process.cwd(), async () => { + const activeKits = await Kit.active() + + if (activeKits.length === 0) { + process.stdout.write("No active kits for this project." + EOL) + process.stdout.write(EOL + `Activate one: altimate-code kit activate ` + EOL) + process.stdout.write(`Auto-detect: altimate-code kit detect` + EOL) + return + } + + process.stdout.write(EOL) + process.stdout.write(`Active kits (${activeKits.length}):` + EOL + EOL) + + for (const kit of activeKits) { + const tier = kit.tier || "community" + const tierBadge = tier !== "community" ? ` [${tier}]` : "" + process.stdout.write(` ${kit.name}${tierBadge}` + EOL) + if (kit.description) { + process.stdout.write(` ${kit.description}` + EOL) + } + + // Show skill packs if any + if (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) { + for (const [packName, pack] of Object.entries(kit.skill_packs)) { + const badge = pack.activation === "always" ? "●" : pack.activation === "detect" ? "◐" : "○" + process.stdout.write(` ${badge} ${packName} (${pack.activation}, ${pack.skills.length} skills)` + EOL) + } + } + + process.stdout.write(EOL) + } + }) + }, +}) +// altimate_change end + +// altimate_change start — kit: validate subcommand +const KitValidateCommand = cmd({ + command: "validate [name]", + describe: "validate a kit's YAML format and references", + builder: (yargs) => + yargs.positional("name", { + type: "string", + describe: "name of the kit to validate (defaults to all)", + }), + async handler(args) { + const targetName = args.name as string | undefined + await bootstrap(process.cwd(), async () => { + const kits = targetName ? [await Kit.get(targetName)].filter(Boolean) : await Kit.all() + + if (kits.length === 0) { + if (targetName) { + process.stderr.write(`Error: Kit "${targetName}" not found.` + EOL) + process.exit(1) + } + process.stdout.write("No kits to validate." + EOL) + return + } + + let hasErrors = false + const pass = (msg: string) => process.stdout.write(` ✓ ${msg}` + EOL) + const fail = (msg: string) => { process.stdout.write(` ✗ ${msg}` + EOL); hasErrors = true } + const warn = (msg: string) => process.stdout.write(` ⚠ ${msg}` + EOL) + + for (const kit of kits as Kit.Info[]) { + process.stdout.write(EOL + `Validating: ${kit.name}` + EOL + EOL) + + // 1. Name format + if (/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(kit.name)) { + pass(`Name "${kit.name}" is valid`) + } else { + fail(`Name "${kit.name}" has invalid format (must be lowercase, hyphens, 2+ chars)`) + } + + // 2. Description + if (kit.description && !kit.description.startsWith("TODO")) { + pass(`Description present`) + } else { + warn(`Description is missing or starts with TODO`) + } + + // 3. Version + if (kit.version && /^\d+\.\d+\.\d+/.test(kit.version)) { + pass(`Version "${kit.version}" is valid semver`) + } else { + warn(`Version "${kit.version || "(none)"}" may not be valid semver`) + } + + // 4. Skills references + const allSkills = (kit.skill_packs && Object.keys(kit.skill_packs).length > 0) + ? Kit.allSkillsFromPacks(kit) + : (kit.skills || []) + if (allSkills.length > 0) { + pass(`${allSkills.length} skill source(s) defined`) + for (const skill of allSkills) { + if (typeof skill === "string") { + pass(` Skill reference: "${skill}"`) + } else { + if (!skill.source) { + fail(` Skill source is empty`) + } else { + pass(` Skill source: "${skill.source}"${skill.select ? ` [${skill.select.join(", ")}]` : ""}`) + } + } + } + } else { + warn(`No skills defined`) + } + + // 5. MCP servers + if (kit.mcp && Object.keys(kit.mcp).length > 0) { + for (const [name, config] of Object.entries(kit.mcp)) { + const cfg = config as Record + const type = (cfg.type as string) || "stdio" + if (type === "stdio" || type === "local") { + if (cfg.command && Array.isArray(cfg.command) && (cfg.command as string[]).length > 0) { + pass(`MCP "${name}": command defined`) + } else { + fail(`MCP "${name}": missing command for stdio server`) + } + } else if (type === "sse" || type === "streamable-http" || type === "remote") { + if (cfg.url) { + pass(`MCP "${name}": URL defined`) + } else { + fail(`MCP "${name}": missing url for remote server`) + } + } + + // Check env_keys + if (Array.isArray(cfg.env_keys)) { + for (const key of cfg.env_keys as string[]) { + if (process.env[key]) { + pass(`MCP "${name}": env var ${key} is set`) + } else { + warn(`MCP "${name}": env var ${key} is NOT set`) + } + } + } + } + } + + // 6. Detection rules + if (kit.detect && kit.detect.length > 0) { + pass(`${kit.detect.length} detection rule(s) defined`) + } else { + warn(`No detection rules — kit won't appear in 'kit detect'`) + } + + // 7. Instructions + if (kit.instructions && !kit.instructions.startsWith("TODO")) { + pass(`Instructions present (${kit.instructions.split("\n").length} lines)`) + } else { + warn(`Instructions missing or placeholder`) + } + } + + process.stdout.write(EOL) + if (hasErrors) { + process.stdout.write(`Validation: FAIL — fix the issues above` + EOL) + process.exitCode = 1 + } else { + process.stdout.write(`Validation: PASS` + EOL) + } + }) + }, +}) +// altimate_change end + +// --------------------------------------------------------------------------- +// Top-level kit command +// --------------------------------------------------------------------------- + +export const KitCommand = cmd({ + command: "kit", + describe: "manage kits — bundles of skills, MCP servers, and plugins", + builder: (yargs) => + yargs + .command(KitListCommand) + .command(KitCreateCommand) + .command(KitShowCommand) + .command(KitInstallCommand) + .command(KitRemoveCommand) + .command(KitDetectCommand) + // altimate_change start — kit: register new subcommands + .command(KitActivateCommand) + .command(KitDeactivateCommand) + .command(KitSearchCommand) + .command(KitStatusCommand) + .command(KitValidateCommand) + // altimate_change end + .demandCommand(), + async handler() {}, +}) +// altimate_change end diff --git a/packages/opencode/src/cli/cmd/tui/thread.ts b/packages/opencode/src/cli/cmd/tui/thread.ts index 1fa1540fd8..20efe06ec8 100644 --- a/packages/opencode/src/cli/cmd/tui/thread.ts +++ b/packages/opencode/src/cli/cmd/tui/thread.ts @@ -14,6 +14,10 @@ import type { EventSource } from "./context/sdk" import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32" import { TuiConfig } from "@/config/tui" import { Instance } from "@/project/instance" +// altimate_change start — kit: import Kit for startup detection nudge +import { Kit } from "@/kit/kit" +import { EOL } from "os" +// altimate_change end declare global { const OPENCODE_WORKER_PATH: string @@ -173,6 +177,27 @@ export const TuiThreadCommand = cmd({ fn: () => TuiConfig.get(), }) + // altimate_change start — kit: non-blocking kit detection nudge on TUI startup + Instance.provide({ + directory: cwd, + fn: async () => { + try { + const activeKits = await Kit.active() + if (activeKits.length > 0) return // already has active kits, no nudge needed + const detected = await Kit.detect() + if (detected.length > 0) { + const first = detected[0] + process.stderr.write( + `\x1b[2m\u{1F4A1} Kit available: ${first.kit.name} \u2014 run /kit activate ${first.kit.name}\x1b[0m` + EOL, + ) + } + } catch { + // Kit detection is best-effort; never block startup + } + }, + }).catch(() => {}) + // altimate_change end + const network = await resolveNetworkOptions(args) const external = process.argv.includes("--port") || diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index a19a18379c..52c3b81521 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -1076,6 +1076,9 @@ export namespace Config { .optional() .describe("Command configuration, see https://altimate.ai/docs/commands"), skills: Skills.optional().describe("Additional skill folder paths"), + // altimate_change start — kit: config schema for kit paths and URLs + kits: Skills.optional().describe("Additional kit folder paths and URLs (same shape as skills config)"), + // altimate_change end watcher: z .object({ ignore: z.array(z.string()).optional(), diff --git a/packages/opencode/src/index.ts b/packages/opencode/src/index.ts index 9a32dd9670..5772514d48 100644 --- a/packages/opencode/src/index.ts +++ b/packages/opencode/src/index.ts @@ -36,6 +36,9 @@ import { TraceCommand } from "./cli/cmd/trace" // altimate_change start — top-level skill command import { SkillCommand } from "./cli/cmd/skill" // altimate_change end +// altimate_change start — kit: top-level kit command +import { KitCommand } from "./cli/cmd/kit" +// altimate_change end // altimate_change start — check: deterministic SQL check command import { CheckCommand } from "./cli/cmd/check" // altimate_change end @@ -208,6 +211,9 @@ let cli = yargs(hideBin(process.argv)) // altimate_change start — top-level skill command .command(SkillCommand) // altimate_change end + // altimate_change start — kit: register kit command + .command(KitCommand) + // altimate_change end // altimate_change start — check: register deterministic SQL check command .command(CheckCommand) // altimate_change end diff --git a/packages/opencode/src/kit/index.ts b/packages/opencode/src/kit/index.ts new file mode 100644 index 0000000000..010ec61362 --- /dev/null +++ b/packages/opencode/src/kit/index.ts @@ -0,0 +1,3 @@ +// altimate_change start — kit: re-export kit module +export { Kit } from "./kit" +// altimate_change end diff --git a/packages/opencode/src/kit/kit.ts b/packages/opencode/src/kit/kit.ts new file mode 100644 index 0000000000..efd7cd9243 --- /dev/null +++ b/packages/opencode/src/kit/kit.ts @@ -0,0 +1,430 @@ +// altimate_change start — kit: core kit module for bundling skills + MCP + plugins + instructions +import z from "zod" +import path from "path" +import { mkdir, writeFile, unlink } from "fs/promises" +import matter from "gray-matter" +import { Config } from "../config/config" +import { Instance } from "../project/instance" +import { State } from "../project/state" +import { Log } from "../util/log" +import { Global } from "@/global" +import { Filesystem } from "@/util/filesystem" +import { Glob } from "../util/glob" + +export namespace Kit { + const log = Log.create({ service: "kit" }) + + // Kit YAML schema - this is what goes in KIT.yaml frontmatter or body + export const McpConfig = z.object({ + // Kit uses user-friendly names: "stdio" → mapped to "local", "sse"/"streamable-http" → mapped to "remote" + type: z.enum(["stdio", "sse", "streamable-http", "local", "remote"]).default("stdio"), + command: z.array(z.string()).optional(), + args: z.array(z.string()).optional(), + url: z.string().optional(), + env: z.record(z.string(), z.string()).optional(), + env_keys: z + .array(z.string()) + .optional() + .describe("Env var names that must be set by the user"), + description: z.string().optional(), + }) + + // altimate_change start — kit: trust tier enum for kit provenance + export const Tier = z + .string() + .transform((v) => v?.toLowerCase()) + .pipe(z.enum(["built-in", "verified", "community", "archived"])) + .default("community") + export type Tier = z.infer + // altimate_change end + + // altimate_change start — kit: skill pack schema for grouped skill activation + export const SkillPack = z.object({ + description: z.string().optional(), + skills: z + .array( + z.union([ + z.string(), + z.object({ + source: z.string(), + select: z.array(z.string()).optional(), + }), + ]), + ) + .default([]), + activation: z.enum(["always", "detect", "manual", "deferred"]).default("always"), + detect: z + .array( + z.object({ + files: z.array(z.string()), + }), + ) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]), + }) + export type SkillPack = z.infer + // altimate_change end + + export const Info = z.object({ + name: z.string(), + description: z.string(), + version: z.string().optional().default("1.0.0"), + author: z.string().optional(), + location: z.string(), // filesystem path where the kit was loaded from + + // altimate_change start — kit: trust tier field + // Trust tier + tier: Tier.nullable().optional().transform((v) => v ?? "community").default("community"), + // altimate_change end + + // altimate_change start — kit: skill packs with activation modes + // Skill packs — organized groups of skills with activation modes + // When present, takes precedence over flat `skills` array + skill_packs: z + .record(z.string(), SkillPack) + .nullable() + .optional() + .transform((v) => v ?? {}) + .default({}), + // altimate_change end + + // What the kit bundles + // Note: YAML parses `key: []` with trailing comments as null, so we accept nullable + skills: z + .array( + z.union([ + z.string(), // skill name (already installed) + z.object({ + source: z + .string() + .describe("GitHub repo (owner/repo) or URL to fetch skills from"), + select: z + .array(z.string()) + .optional() + .describe("Specific skill names to install from source"), + }), + ]), + ) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]), + + mcp: z + .record(z.string(), McpConfig) + .nullable() + .optional() + .transform((v) => v ?? {}) + .default({}), + + plugins: z + .array(z.string()) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]) + .describe("npm package specs, e.g. @dagster/altimate-plugin@^1.0"), + + instructions: z + .string() + .nullable() + .optional() + .transform((v) => v ?? undefined) + .describe("Additional system instructions added to every conversation"), + + // Auto-detection: when to suggest this kit + detect: z + .array( + z.object({ + files: z + .array(z.string()) + .describe("Glob patterns that indicate this kit is relevant"), + message: z + .string() + .optional() + .describe("Custom suggestion message"), + }), + ) + .nullable() + .optional() + .transform((v) => v ?? []) + .default([]), + + // The full markdown content (instructions, docs, etc.) + content: z.string().nullable().optional().transform((v) => v ?? "").default(""), + }) + export type Info = z.infer + + // --- State management (mirrors Skill.state pattern) --- + + const KIT_FILE_PATTERN = "KIT.{yaml,yml,md}" + + const stateInit: () => Promise<{ + kits: Record + dirs: string[] + }> = async () => { + const kits: Record = {} + const dirs = new Set() + const config = await Config.get() + + // 1. Scan .opencode/kits/ and .altimate-code/kits/ directories + for (const dir of await Config.directories()) { + const matches = await Glob.scan(`{kit,kits}/**/${KIT_FILE_PATTERN}`, { + cwd: dir, + absolute: true, + dot: true, + symlink: true, + }) + for (const item of matches) { + const kit = await loadKit(item) + if (kit) { + kits[kit.name] = kit + dirs.add(path.dirname(item)) + } + } + } + + // 2. Load from config paths + if (config.kits?.paths) { + for (let p of config.kits.paths) { + if (p.startsWith("~/")) p = path.join(Global.Path.home, p.slice(2)) + if (!path.isAbsolute(p)) p = path.resolve(Instance.directory, p) + + const stat = Filesystem.stat(p) + if (!stat) continue + + if (stat.isDirectory()) { + const matches = await Glob.scan(KIT_FILE_PATTERN, { + cwd: p, + absolute: true, + dot: true, + symlink: true, + }) + for (const item of matches) { + const kit = await loadKit(item) + if (kit) { + kits[kit.name] = kit + dirs.add(p) + } + } + } else { + const kit = await loadKit(p) + if (kit) { + kits[kit.name] = kit + dirs.add(path.dirname(p)) + } + } + } + } + + // 3. Load from installed kits directory + const installedDir = path.join(Global.Path.data, "kits") + if (await Filesystem.exists(installedDir)) { + const matches = await Glob.scan(KIT_FILE_PATTERN, { + cwd: installedDir, + absolute: true, + dot: true, + symlink: true, + }) + for (const item of matches) { + const kit = await loadKit(item) + if (kit) { + kits[kit.name] = kit + dirs.add(installedDir) + } + } + } + + return { kits, dirs: Array.from(dirs) } + } + + export const state = Instance.state(stateInit) + + export function invalidate() { + State.invalidate(Instance.directory, stateInit) + } + + // --- Loading --- + + async function loadKit(filePath: string): Promise { + try { + const raw = await Filesystem.readText(filePath) + if (!raw) return undefined + + const ext = path.extname(filePath).toLowerCase() + let data: Record = {} + let content = "" + + if (ext === ".md") { + // Markdown with YAML frontmatter + const parsed = matter(raw) + data = parsed.data + content = parsed.content.trim() + } else { + // YAML file - parse the whole thing via gray-matter + const parsed = matter("---\n" + raw + "\n---") + data = parsed.data + content = (data.content as string) || "" + delete data.content + } + + const result = Info.safeParse({ + ...data, + location: filePath, + content, + }) + + if (!result.success) { + log.warn("invalid kit", { + path: filePath, + issues: result.error.issues, + }) + return undefined + } + + // Validate name to prevent path traversal + if (result.data.name && !/^[a-z][a-z0-9]*(-[a-z0-9]+)*$/.test(result.data.name)) { + log.warn("invalid kit name", { path: filePath, name: result.data.name }) + return undefined + } + + return result.data + } catch (err) { + log.error("failed to load kit", { path: filePath, err }) + return undefined + } + } + + // --- Public API --- + + export async function get(name: string): Promise { + return state().then((s) => s.kits[name]) + } + + export async function all(): Promise { + return state().then((s) => Object.values(s.kits)) + } + + export async function dirs(): Promise { + return state().then((s) => s.dirs) + } + + // --- Detection --- + + /** Check which installed kits match the current project */ + export async function detect(): Promise< + Array<{ kit: Info; matched: string[] }> + > { + const kits = await all() + const results: Array<{ kit: Info; matched: string[] }> = [] + + for (const kit of kits) { + if (!kit.detect || kit.detect.length === 0) continue + + const matchedFiles: string[] = [] + for (const rule of kit.detect) { + for (const pattern of rule.files) { + const matches = await Glob.scan(pattern, { + cwd: Instance.directory, + absolute: false, + dot: true, + symlink: true, + }) + if (matches.length > 0) { + matchedFiles.push(...matches.slice(0, 3)) // limit to 3 examples + } + } + } + + if (matchedFiles.length > 0) { + results.push({ kit, matched: [...new Set(matchedFiles)] }) + } + } + + return results + } + + // altimate_change start — kit: active kit management and context scoping + /** Get active kits for the current project (reads .opencode/active-kits) */ + export async function active(): Promise { + const activeFile = await findActiveKitsFile() + if (!activeFile) return [] + + try { + const raw = await Filesystem.readText(activeFile) + if (!raw) return [] + const names = raw.split("\n").map((l) => l.trim()).filter(Boolean) + const all = await state().then((s) => s.kits) + return names.map((n) => all[n]).filter((r): r is Info => !!r) + } catch { + return [] + } + } + + /** Activate a kit for the current project */ + export async function activate(name: string): Promise { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const activeFile = path.join(rootDir, ".opencode", "active-kits") + + let names: string[] = [] + try { + const raw = await Filesystem.readText(activeFile) + if (raw) names = raw.split("\n").map((l) => l.trim()).filter(Boolean) + } catch {} + + if (!names.includes(name)) { + names.push(name) + } + + await mkdir(path.dirname(activeFile), { recursive: true }) + await writeFile(activeFile, names.join("\n") + "\n", "utf-8") + } + + /** Deactivate a kit for the current project */ + export async function deactivate(name: string): Promise { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const activeFile = path.join(rootDir, ".opencode", "active-kits") + + let names: string[] = [] + try { + const raw = await Filesystem.readText(activeFile) + if (raw) names = raw.split("\n").map((l) => l.trim()).filter(Boolean) + } catch { return } + + names = names.filter((n) => n !== name) + + if (names.length === 0) { + try { await unlink(activeFile) } catch {} + } else { + await writeFile(activeFile, names.join("\n") + "\n", "utf-8") + } + } + + async function findActiveKitsFile(): Promise { + const rootDir = Instance.worktree !== "/" ? Instance.worktree : Instance.directory + const candidates = [ + path.join(rootDir, ".opencode", "active-kits"), + path.join(rootDir, ".altimate-code", "active-kits"), + ] + for (const f of candidates) { + if (await Filesystem.exists(f)) return f + } + return undefined + } + + /** Get all skills referenced by a kit's skill_packs */ + export function allSkillsFromPacks(kit: Info): Array { + if (!kit.skill_packs || Object.keys(kit.skill_packs).length === 0) { + return kit.skills + } + const result: Array = [] + for (const [, pack] of Object.entries(kit.skill_packs)) { + result.push(...pack.skills) + } + return result + } + // altimate_change end +} +// altimate_change end From 6e1b0e15b655d7c209eaa56a973d895503f26f97 Mon Sep 17 00:00:00 2001 From: anandgupta42 Date: Sat, 28 Mar 2026 22:36:45 -0700 Subject: [PATCH 2/2] =?UTF-8?q?feat:=20add=20Kit=20system=20=E2=80=94=20sh?= =?UTF-8?q?areable=20bundles=20of=20skills,=20MCP=20servers,=20and=20instr?= =?UTF-8?q?uctions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduces the Kit extension system that enables anyone — vendors, solution architects, team leads, individual engineers — to create and distribute shareable development setups. ## What's included **Core runtime** (`packages/opencode/src/kit/`): - `Kit` namespace with Zod schemas, state management, YAML loading - Trust tiers (`built-in`, `verified`, `community`) - Skill packs with activation modes (`always`, `detect`, `manual`) - Activate/deactivate lifecycle with full cleanup **11 CLI commands** (`packages/opencode/src/cli/cmd/kit.ts`): - `kit list`, `kit create`, `kit show`, `kit install`, `kit remove` - `kit activate` — one command: installs skills, configures MCP, enables - `kit deactivate` — clean removal (instructions + MCP config + active-kits) - `kit detect`, `kit search`, `kit status`, `kit validate` **TUI startup nudge** (`packages/opencode/src/cli/cmd/tui/thread.ts`): - Non-blocking detection on TUI startup - Shows one-line suggestion when matching kits found **JSONC-preserving config writes**: - Uses `jsonc-parser` `modify`/`applyEdits` to preserve user comments - MCP servers added on activate, removed on deactivate **Documentation** (`docs/`): - User guide: `docs/docs/configure/kits.md` (CLI reference, locations, tiers) - Author guide: `docs/docs/develop/kits.md` (full schema, tutorial, examples) - Ecosystem plan: `docs/PARTNER_ECOSYSTEM_PLAN.md` (strategy + simulation results) - Roadmap with planned features (`kit switch`, inheritance, `kit enforce`) ## Testing - 60/60 automated E2E tests passing (name validation, activate/deactivate lifecycle, MCP merge, JSONC preservation, detect, validate, install) - 10 stakeholder simulations across 5 scenarios (Snowflake, Dagster, dbt Labs, Airbyte, Healthcare, MSP consulting, OSS contributor, self-serve, enterprise) - 29 bugs found and fixed across 3 review rounds ## External - Kit content lives in `AltimateAI/data-engineering-skills` (merged PR #9) - Registry at `data-engineering-skills/registry.json` with 1 real entry - `dbt-snowflake` kit: 9 skills + dbt MCP server Co-Authored-By: Claude Opus 4.6 (1M context) --- packages/opencode/src/cli/cmd/tui/thread.ts | 37 ++++++++++----------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/packages/opencode/src/cli/cmd/tui/thread.ts b/packages/opencode/src/cli/cmd/tui/thread.ts index 20efe06ec8..099157ae42 100644 --- a/packages/opencode/src/cli/cmd/tui/thread.ts +++ b/packages/opencode/src/cli/cmd/tui/thread.ts @@ -14,8 +14,7 @@ import type { EventSource } from "./context/sdk" import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32" import { TuiConfig } from "@/config/tui" import { Instance } from "@/project/instance" -// altimate_change start — kit: import Kit for startup detection nudge -import { Kit } from "@/kit/kit" +// altimate_change start — kit: Kit imported dynamically in setTimeout below to avoid test mock issues import { EOL } from "os" // altimate_change end @@ -178,24 +177,24 @@ export const TuiThreadCommand = cmd({ }) // altimate_change start — kit: non-blocking kit detection nudge on TUI startup - Instance.provide({ - directory: cwd, - fn: async () => { - try { - const activeKits = await Kit.active() - if (activeKits.length > 0) return // already has active kits, no nudge needed - const detected = await Kit.detect() - if (detected.length > 0) { - const first = detected[0] - process.stderr.write( - `\x1b[2m\u{1F4A1} Kit available: ${first.kit.name} \u2014 run /kit activate ${first.kit.name}\x1b[0m` + EOL, - ) - } - } catch { - // Kit detection is best-effort; never block startup + // Deferred to avoid interfering with TUI initialization and test mocks. + // Uses setTimeout + dynamic import so Kit module is not required at parse time. + setTimeout(async () => { + try { + const { Kit } = await import("../../../kit") + const activeKits = await Kit.active() + if (activeKits.length > 0) return + const detected = await Kit.detect() + if (detected.length > 0) { + const first = detected[0] + process.stderr.write( + `\x1b[2m\u{1F4A1} Kit available: ${first.kit.name} \u2014 run /kit activate ${first.kit.name}\x1b[0m` + EOL, + ) } - }, - }).catch(() => {}) + } catch { + // Kit detection is best-effort; never block startup + } + }, 100) // altimate_change end const network = await resolveNetworkOptions(args)