diff --git a/delivery-process/specs/data-api-cli-ergonomics.feature b/delivery-process/specs/data-api-cli-ergonomics.feature index fd55db4f..c0b1cdb8 100644 --- a/delivery-process/specs/data-api-cli-ergonomics.feature +++ b/delivery-process/specs/data-api-cli-ergonomics.feature @@ -1,6 +1,7 @@ @libar-docs @libar-docs-pattern:DataAPICLIErgonomics -@libar-docs-status:roadmap +@libar-docs-status:completed +@libar-docs-unlock-reason:Final-deliverable-status-update @libar-docs-phase:25d @libar-docs-product-area:DataAPI @libar-docs-effort:2d @@ -35,12 +36,12 @@ Feature: Data API CLI Ergonomics - Performance and Interactive Mode Background: Deliverables Given the following deliverables: | Deliverable | Status | Location | Tests | Test Type | - | MasterDataset cache with mtime invalidation | pending | src/cli/dataset-cache.ts | Yes | unit | - | REPL mode handler | pending | src/cli/repl.ts | Yes | integration | + | MasterDataset cache with mtime invalidation | complete | src/cli/dataset-cache.ts | Yes | unit | + | REPL mode handler | complete | src/cli/repl.ts | Yes | integration | | FSM short-circuit for static queries | complete | src/cli/process-api.ts | Yes | unit | - | Per-subcommand help system | pending | src/cli/process-api.ts | Yes | integration | - | Dry-run mode | pending | src/cli/process-api.ts | Yes | integration | - | Validation summary in metadata | pending | src/cli/process-api.ts | Yes | unit | + | Per-subcommand help system | complete | src/cli/process-api.ts | Yes | integration | + | Dry-run mode | complete | src/cli/process-api.ts | Yes | integration | + | Validation summary in metadata | complete | src/cli/process-api.ts | Yes | unit | # ============================================================================ # RULE 1: Pipeline Caching diff --git a/docs-live/ARCHITECTURE.md b/docs-live/ARCHITECTURE.md index e102a628..bc4e6cb6 100644 --- a/docs-live/ARCHITECTURE.md +++ b/docs-live/ARCHITECTURE.md @@ -7,11 +7,11 @@ ## Overview -This diagram was auto-generated from 158 annotated source files across 11 bounded contexts. +This diagram was auto-generated from 160 annotated source files across 11 bounded contexts. | Metric | Count | | ---------------- | ----- | -| Total Components | 158 | +| Total Components | 160 | | Bounded Contexts | 11 | | Component Roles | 5 | @@ -43,6 +43,7 @@ graph TB subgraph cli["Cli BC"] CLIVersionHelper["CLIVersionHelper"] ValidatePatternsCLI["ValidatePatternsCLI"] + ReplMode["ReplMode[service]"] ProcessAPICLIImpl["ProcessAPICLIImpl[service]"] OutputPipelineImpl["OutputPipelineImpl[service]"] LintProcessCLI["LintProcessCLI"] @@ -50,6 +51,7 @@ graph TB TagTaxonomyCLI["TagTaxonomyCLI"] Documentation_Generator_CLI["Documentation Generator CLI"] CLIErrorHandler["CLIErrorHandler"] + DatasetCache["DatasetCache[infrastructure]"] CLISchema["CLISchema"] end subgraph config["Config BC"] @@ -179,7 +181,6 @@ graph TB FSMModule["FSMModule"] end subgraph shared["Shared Infrastructure"] - Convention_Annotation_Example___DD_3_Decision["Convention Annotation Example β€” DD-3 Decision[decider]"] WorkflowConfigSchema["WorkflowConfigSchema"] Tag_Registry_Configuration["Tag Registry Configuration"] OutputSchemas["OutputSchemas"] @@ -190,6 +191,8 @@ graph TB CodecUtils["CodecUtils"] ResultMonadTypes["ResultMonadTypes"] ErrorFactoryTypes["ErrorFactoryTypes"] + DoDValidationTypes["DoDValidationTypes"] + ValidationModule["ValidationModule"] StatusValues["StatusValues"] RiskLevels["RiskLevels"] NormalizedStatus["NormalizedStatus"] @@ -203,8 +206,15 @@ graph TB GeneratorTypes["GeneratorTypes"] SourceMappingValidator["SourceMappingValidator"] GeneratorRegistry["GeneratorRegistry"] + RenderableUtils["RenderableUtils"] + SectionBlock["SectionBlock"] + RenderableDocumentModel_RDM_["RenderableDocumentModel(RDM)"] ShapeExtractor["ShapeExtractor"] LayerInference["LayerInference"] + ProcessStateTypes["ProcessStateTypes"] + StubResolverImpl["StubResolverImpl"] + RulesQueryModule["RulesQueryModule"] + APIModule["APIModule"] CLIVersionHelper["CLIVersionHelper"] ValidatePatternsCLI["ValidatePatternsCLI"] LintProcessCLI["LintProcessCLI"] @@ -212,15 +222,8 @@ graph TB TagTaxonomyCLI["TagTaxonomyCLI"] Documentation_Generator_CLI["Documentation Generator CLI"] CLIErrorHandler["CLIErrorHandler"] - ProcessStateTypes["ProcessStateTypes"] - StubResolverImpl["StubResolverImpl"] - RulesQueryModule["RulesQueryModule"] - APIModule["APIModule"] - RenderableUtils["RenderableUtils"] - SectionBlock["SectionBlock"] - RenderableDocumentModel_RDM_["RenderableDocumentModel(RDM)"] - DoDValidationTypes["DoDValidationTypes"] - ValidationModule["ValidationModule"] + Convention_Annotation_Example___DD_3_Decision["Convention Annotation Example β€” DD-3 Decision[decider]"] + FSMModule["FSMModule"] ProcessGuardTypes["ProcessGuardTypes"] ProcessGuardModule["ProcessGuardModule"] DetectChanges["DetectChanges"] @@ -246,7 +249,6 @@ graph TB ClaudeModuleCodec["ClaudeModuleCodec"] BusinessRulesCodec["BusinessRulesCodec"] AdrDocumentCodec["AdrDocumentCodec"] - FSMModule["FSMModule"] CodecBaseOptions["CodecBaseOptions"] ADR006SingleReadModelArchitecture["ADR006SingleReadModelArchitecture"] ADR005CodecBasedMarkdownRendering["ADR005CodecBasedMarkdownRendering"] @@ -264,10 +266,10 @@ graph TB EffortVarianceTracking["EffortVarianceTracking"] ConfigBasedWorkflowDefinition["ConfigBasedWorkflowDefinition"] CliBehaviorTesting["CliBehaviorTesting"] - StringUtils["StringUtils"] ProcessGuardTesting["ProcessGuardTesting"] ResultMonad["ResultMonad"] ErrorFactories["ErrorFactories"] + StringUtils["StringUtils"] SessionHandoffs["SessionHandoffs"] SessionFileLifecycle["SessionFileLifecycle"] KebabCaseSlugs["KebabCaseSlugs"] @@ -278,22 +280,22 @@ graph TB DocDirectiveSchema ..-> MvpWorkflowImplementation ResultMonadTypes ..-> ResultMonad ErrorFactoryTypes ..-> ErrorFactories + DoDValidator --> DoDValidationTypes + DoDValidator --> DualSourceExtractor + AntiPatternDetector --> DoDValidationTypes CategoryDefinition ..-> CategoryDefinitions - GherkinScanner --> GherkinASTParser - TypeScript_AST_Parser --> DocDirectiveSchema LintModule --> LintRules LintModule --> LintEngine LintEngine --> LintRules LintEngine --> CodecUtils + GherkinScanner --> GherkinASTParser + TypeScript_AST_Parser --> DocDirectiveSchema SourceMapper -.-> DecisionDocCodec SourceMapper -.-> ShapeExtractor SourceMapper -.-> GherkinASTParser GeneratorRegistry --> GeneratorTypes Documentation_Generation_Orchestrator --> Pattern_Scanner - GherkinExtractor --> GherkinASTParser - DualSourceExtractor --> GherkinExtractor - DualSourceExtractor --> GherkinScanner - Document_Extractor --> Pattern_Scanner + SectionBlock ..-> RenderableDocument WorkflowLoader --> WorkflowConfigSchema WorkflowLoader --> CodecUtils ConfigResolver --> ProjectConfigTypes @@ -311,22 +313,10 @@ graph TB DefineConfig --> ProjectConfigTypes ConfigLoader --> DeliveryProcessFactory ConfigLoader --> ConfigurationTypes - ValidatePatternsCLI --> GherkinScanner - ValidatePatternsCLI --> GherkinExtractor - ValidatePatternsCLI --> MasterDataset - ValidatePatternsCLI --> CodecUtils - ProcessAPICLIImpl --> ProcessStateAPI - ProcessAPICLIImpl --> MasterDataset - ProcessAPICLIImpl --> PipelineFactory - ProcessAPICLIImpl --> RulesQueryModule - ProcessAPICLIImpl --> PatternSummarizerImpl - ProcessAPICLIImpl --> FuzzyMatcherImpl - ProcessAPICLIImpl --> OutputPipelineImpl - OutputPipelineImpl --> PatternSummarizerImpl - LintProcessCLI --> ProcessGuardModule - LintPatternsCLI --> LintEngine - LintPatternsCLI --> LintRules - TagTaxonomyCLI --> ConfigLoader + GherkinExtractor --> GherkinASTParser + DualSourceExtractor --> GherkinExtractor + DualSourceExtractor --> GherkinScanner + Document_Extractor --> Pattern_Scanner PatternSummarizerImpl --> ProcessStateAPI StubResolverImpl --> ProcessStateAPI ScopeValidatorImpl --> ProcessStateAPI @@ -349,10 +339,28 @@ graph TB ContextAssemblerImpl --> StubResolverImpl ArchQueriesImpl --> ProcessStateAPI ArchQueriesImpl --> MasterDataset - SectionBlock ..-> RenderableDocument - DoDValidator --> DoDValidationTypes - DoDValidator --> DualSourceExtractor - AntiPatternDetector --> DoDValidationTypes + ValidatePatternsCLI --> GherkinScanner + ValidatePatternsCLI --> GherkinExtractor + ValidatePatternsCLI --> MasterDataset + ValidatePatternsCLI --> CodecUtils + ReplMode --> PipelineFactory + ReplMode --> ProcessStateAPI + ProcessAPICLIImpl --> ProcessStateAPI + ProcessAPICLIImpl --> MasterDataset + ProcessAPICLIImpl --> PipelineFactory + ProcessAPICLIImpl --> RulesQueryModule + ProcessAPICLIImpl --> PatternSummarizerImpl + ProcessAPICLIImpl --> FuzzyMatcherImpl + ProcessAPICLIImpl --> OutputPipelineImpl + OutputPipelineImpl --> PatternSummarizerImpl + LintProcessCLI --> ProcessGuardModule + LintPatternsCLI --> LintEngine + LintPatternsCLI --> LintRules + TagTaxonomyCLI --> ConfigLoader + DatasetCache --> PipelineFactory + DatasetCache --> WorkflowConfigSchema + FSMValidator --> FSMTransitions + FSMValidator --> FSMStates DetectChanges --> DeriveProcessState DeriveProcessState --> GherkinScanner DeriveProcessState --> FSMValidator @@ -381,8 +389,6 @@ graph TB DesignReviewCodec --> MasterDataset DesignReviewCodec --> MermaidDiagramUtils ArchitectureCodec --> MasterDataset - FSMValidator --> FSMTransitions - FSMValidator --> FSMStates ADR006SingleReadModelArchitecture -.-> ADR005CodecBasedMarkdownRendering ADR003SourceFirstPatternArchitecture -.-> ADR001TaxonomyCanonicalValues ValidatorReadModelConsolidation -.-> ADR006SingleReadModelArchitecture @@ -431,8 +437,10 @@ All components with architecture annotations: | 🚧 Process State API | api | service | application | src/api/process-state.ts | | βœ… Scope Validator Impl | api | service | application | src/api/scope-validator.ts | | βœ… CLI Schema | cli | - | domain | src/cli/cli-schema.ts | +| 🚧 Dataset Cache | cli | infrastructure | infrastructure | src/cli/dataset-cache.ts | | 🚧 Output Pipeline Impl | cli | service | application | src/cli/output-pipeline.ts | | 🚧 Process API CLI Impl | cli | service | application | src/cli/process-api.ts | +| 🚧 Repl Mode | cli | service | application | src/cli/repl.ts | | βœ… Configuration Defaults | config | - | domain | src/config/defaults.ts | | βœ… Configuration Presets | config | - | domain | src/config/presets.ts | | βœ… Configuration Types | config | - | domain | src/config/types.ts | diff --git a/docs-live/BUSINESS-RULES.md b/docs-live/BUSINESS-RULES.md index 2a212aca..aff4128d 100644 --- a/docs-live/BUSINESS-RULES.md +++ b/docs-live/BUSINESS-RULES.md @@ -5,7 +5,7 @@ --- -**Domain constraints and invariants extracted from feature specifications. 592 rules from 126 features across 7 product areas.** +**Domain constraints and invariants extracted from feature specifications. 598 rules from 131 features across 7 product areas.** --- @@ -16,7 +16,7 @@ | [Annotation](business-rules/annotation.md) | 20 | 88 | 88 | | [Configuration](business-rules/configuration.md) | 7 | 32 | 32 | | [Core Types](business-rules/core-types.md) | 5 | 22 | 22 | -| [Data API](business-rules/data-api.md) | 21 | 89 | 89 | +| [Data API](business-rules/data-api.md) | 26 | 95 | 95 | | [Generation](business-rules/generation.md) | 60 | 300 | 300 | | [Process](business-rules/process.md) | 2 | 7 | 7 | | [Validation](business-rules/validation.md) | 11 | 54 | 54 | diff --git a/docs-live/CHANGELOG-GENERATED.md b/docs-live/CHANGELOG-GENERATED.md index 18502240..f698f57a 100644 --- a/docs-live/CHANGELOG-GENERATED.md +++ b/docs-live/CHANGELOG-GENERATED.md @@ -15,6 +15,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added - **Deliverable Status Taxonomy**: Canonical status values for deliverables in Gherkin Background tables. +- **Repl Mode**: Loads the pipeline once and accepts multiple queries on stdin. +- **Process API CLI Impl**: Exposes ProcessStateAPI methods as CLI subcommands with JSON output. +- **Output Pipeline Impl**: Post-processing pipeline that transforms raw API results into shaped CLI output. +- **Lint Process CLI**: Validates git changes against delivery process rules. +- **Dataset Cache**: Caches the full PipelineResult (MasterDataset + ValidationSummary + warnings) to a JSON file. - **Config Resolver**: Resolves a raw `DeliveryProcessProjectConfig` into a fully-resolved `ResolvedConfig` with all defaults applied, stubs... - **Project Config Types**: Unified project configuration for the delivery-process package. - **Project Config Schema**: Zod validation schema for `DeliveryProcessProjectConfig`. @@ -32,9 +37,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Context Formatter Impl**: First plain-text formatter in the codebase. - **Context Assembler Impl**: Pure function composition over MasterDataset. - **Arch Queries Impl**: Pure functions over MasterDataset for deep architecture exploration. -- **Process API CLI Impl**: Exposes ProcessStateAPI methods as CLI subcommands with JSON output. -- **Output Pipeline Impl**: Post-processing pipeline that transforms raw API results into shaped CLI output. -- **Lint Process CLI**: Validates git changes against delivery process rules. - **FSM Validator**: :PDR005MvpWorkflow Pure validation functions following the Decider pattern: - No I/O, no side effects - Return... - **FSM Transitions**: :PDR005MvpWorkflow Defines valid transitions between FSM states per PDR-005: ``` roadmap ──→ active ──→ completed β”‚ ... - **FSM States**: :PDR005MvpWorkflow Defines the 4-state FSM from PDR-005 MVP Workflow: - roadmap: Planned work (fully editable) -... @@ -43,9 +45,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Design Review Codec**: :Generation Transforms MasterDataset into a RenderableDocument containing design review artifacts: sequence diagrams,... - **Composite Codec**: :Generation Assembles reference documents from multiple codec outputs by concatenating RenderableDocument sections. - **Claude Module Codec**: :Generation Transforms MasterDataset into RenderableDocuments for CLAUDE.md module generation. +- **Sequence Transform Utils**: :Generation Builds pre-computed SequenceIndexEntry objects from patterns that have sequence diagram annotations. - **Reference Generator Registration**: Registers all reference document generators. - **Design Review Generator**: :Generation Generates design review documents for patterns with sequence annotations. -- **Sequence Transform Utils**: :Generation Builds pre-computed SequenceIndexEntry objects from patterns that have sequence diagram annotations. - **Process Guard Types**: :FSMValidator Defines types for the process guard linter including: - Process state derived from file annotations -... - **Process Guard Module**: :FSMValidator,DeriveProcessState,DetectChanges,ProcessGuardDecider Enforces delivery process rules by validating... - **Detect Changes**: Detects changes from git diff including: - Modified, added, deleted files - Status transitions (@libar-docs-status... @@ -56,15 +58,20 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Design Review Generation Tests**: Tests the full design review generation pipeline: sequence annotations are extracted from patterns with business... - **Design Review Generator Lifecycle Tests**: The design review generator cleans up stale markdown files when annotated patterns are renamed or removed from the... - **Architecture Doc Refactoring Testing**: Validates that ARCHITECTURE.md retains its full reference content and that generated documents in docs-live/ coexist... +- **Process Api Cli Repl**: Interactive REPL mode keeps the pipeline loaded for multi-query sessions and supports reload. +- **Process Api Cli Metadata**: Response metadata includes validation summary and pipeline timing for diagnostics. +- **Process Api Cli Help**: Per-subcommand help displays usage, flags, and examples for individual subcommands. +- **Process Api Cli Dry Run**: Dry-run mode shows pipeline scope without processing data. +- **Process Api Cli Cache**: MasterDataset caching between CLI invocations: cache hits, mtime invalidation, and --no-cache bypass. +- **Stub Taxonomy Tag Tests**: Stub metadata (target path, design session) was stored as plain text in JSDoc descriptions, invisible to structured... +- **Stub Resolver Tests**: Design session stubs need structured discovery and resolution to determine which stubs have been implemented and... - **Context Formatter Tests**: Tests for formatContextBundle(), formatDepTree(), formatFileReadingList(), and formatOverview() plain text rendering... - **Context Assembler Tests**: Tests for assembleContext(), buildDepTree(), buildFileReadingList(), and buildOverview() pure functions that operate... -- **Arch Queries Test** - **Pattern Summarize Tests**: Validates that summarizePattern() projects ExtractedPattern (~3.5KB) to PatternSummary (~100 bytes) with the correct... - **Pattern Helpers Tests** - **Output Pipeline Tests**: Validates the output pipeline transforms: summarization, modifiers, list filters, empty stripping, and format output. - **Fuzzy Match Tests**: Validates tiered fuzzy matching: exact > prefix > substring > Levenshtein. -- **Stub Taxonomy Tag Tests**: Stub metadata (target path, design session) was stored as plain text in JSDoc descriptions, invisible to structured... -- **Stub Resolver Tests**: Design session stubs need structured discovery and resolution to determine which stubs have been implemented and... +- **Arch Queries Test** - **Uses Tag Testing**: Tests extraction and processing of @libar-docs-uses and @libar-docs-used-by relationship tags from TypeScript files. - **Depends On Tag Testing**: Tests extraction of @libar-docs-depends-on and @libar-docs-enables relationship tags from Gherkin files. @@ -88,6 +95,20 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ### Added - **Public API**: Main entry point for the @libar-dev/delivery-process package. +- **DoD Validation Types**: Types and schemas for Definition of Done (DoD) validation and anti-pattern detection. +- **Validation Module**: Barrel export for validation module providing: - Definition of Done (DoD) validation for completed phases -... +- **DoD Validator**: Validates that completed phases meet Definition of Done criteria: 1. +- **Anti Pattern Detector**: Detects violations of the dual-source documentation architecture and process hygiene issues that lead to... +- **String Utilities**: Provides shared utilities for string manipulation used across the delivery-process package, including slugification... +- **Utils Module**: Common helper functions used across the delivery-process package. +- **Pattern Id Generator**: Generates unique, deterministic pattern IDs based on file path and line number. +- **Collection Utilities**: Provides shared utilities for working with arrays and collections, such as grouping items by a key function. +- **Result Monad Types**: Explicit error handling via discriminated union. +- **Error Factory Types**: Structured, discriminated error types with factory functions. +- **Pattern Scanner**: Discovers TypeScript files matching glob patterns and filters to only those with `@libar-docs` opt-in. +- **Gherkin Scanner**: Scans .feature files for pattern metadata encoded in Gherkin tags. +- **Gherkin AST Parser**: Parses Gherkin feature files using @cucumber/gherkin and extracts structured data including feature metadata, tags,... +- **TypeScript AST Parser**: Parses TypeScript source files using @typescript-eslint/typescript-estree to extract @libar-docs-\* directives with... - **Workflow Config Schema**: Zod schemas for validating workflow configuration files that define status models, phase definitions, and artifact... - **Tag Registry Configuration**: Defines the structure and validation for tag taxonomy configuration. - **Output Schemas**: Zod schemas for JSON output formats used by CLI tools. @@ -97,12 +118,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Dual Source Schemas**: Zod schemas for dual-source extraction types. - **Doc Directive Schema**: Zod schemas for validating parsed @libar-docs-\* directives from JSDoc comments. - **Codec Utils**: Provides factory functions for creating type-safe JSON parsing and serialization pipelines using Zod schemas. -- **Result Monad Types**: Explicit error handling via discriminated union. -- **Error Factory Types**: Structured, discriminated error types with factory functions. -- **DoD Validation Types**: Types and schemas for Definition of Done (DoD) validation and anti-pattern detection. -- **Validation Module**: Barrel export for validation module providing: - Definition of Done (DoD) validation for completed phases -... -- **DoD Validator**: Validates that completed phases meet Definition of Done criteria: 1. -- **Anti Pattern Detector**: Detects violations of the dual-source documentation architecture and process hygiene issues that lead to... +- **Renderable Utils**: Utility functions for document codecs. +- **Renderable Document**: Universal intermediate format for all generated documentation. +- **Universal Renderer**: Converts RenderableDocument to output strings. +- **Renderable Document Model(RDM)**: Unified document generation using codecs and a universal renderer. +- **Document Generator**: Simplified document generation using codecs. - **Status Values**: THE single source of truth for FSM state values in the monorepo (per PDR-005 FSM). - **Risk Levels**: Three-tier risk classification for roadmap planning. - **Tag Registry Builder**: Constructs a complete TagRegistry from TypeScript constants. @@ -111,19 +131,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Hierarchy Levels**: Three-level hierarchy for organizing work: - epic: Multi-quarter strategic initiatives - phase: Standard work units... - **Format Types**: Defines how tag values are parsed and validated. - **Category Definitions**: Categories are used to classify patterns and organize documentation. -- **String Utilities**: Provides shared utilities for string manipulation used across the delivery-process package, including slugification... -- **Utils Module**: Common helper functions used across the delivery-process package. -- **Pattern Id Generator**: Generates unique, deterministic pattern IDs based on file path and line number. -- **Collection Utilities**: Provides shared utilities for working with arrays and collections, such as grouping items by a key function. -- **Renderable Utils**: Utility functions for document codecs. -- **Renderable Document**: Universal intermediate format for all generated documentation. -- **Universal Renderer**: Converts RenderableDocument to output strings. -- **Renderable Document Model(RDM)**: Unified document generation using codecs and a universal renderer. -- **Document Generator**: Simplified document generation using codecs. -- **Pattern Scanner**: Discovers TypeScript files matching glob patterns and filters to only those with `@libar-docs` opt-in. -- **Gherkin Scanner**: Scans .feature files for pattern metadata encoded in Gherkin tags. -- **Gherkin AST Parser**: Parses Gherkin feature files using @cucumber/gherkin and extracts structured data including feature metadata, tags,... -- **TypeScript AST Parser**: Parses TypeScript source files using @typescript-eslint/typescript-estree to extract @libar-docs-\* directives with... +- **Shape Extractor**: Extracts TypeScript type definitions (interfaces, type aliases, enums, function signatures) from source files for... +- **Layer Inference**: Infers feature file layer (timeline, domain, integration, e2e, component) from directory path patterns. +- **Gherkin Extractor**: Transforms scanned Gherkin feature files into ExtractedPattern objects for inclusion in generated documentation. +- **Dual Source Extractor**: Extracts pattern metadata from both TypeScript code stubs (@libar-docs-_) and Gherkin feature files (@libar-docs-_),... +- **Document Extractor**: Converts scanned file data into complete ExtractedPattern objects with unique IDs, inferred names, categories, and... - **Warning Collector**: Provides a unified system for capturing, categorizing, and reporting non-fatal issues during document generation. - **Generator Types**: Minimal interface for pluggable generators that produce documentation from patterns. - **Source Mapping Validator**: Performs pre-flight checks on source mapping tables before extraction begins. @@ -132,6 +144,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Documentation Generation Orchestrator**: Invariant: The orchestrator is the integration boundary for full docs generation: it delegates dataset construction... - **Content Deduplicator**: Identifies and merges duplicate sections extracted from multiple sources. - **Codec Based Generator**: Adapts the new RenderableDocument Model (RDM) codec system to the existing DocumentGenerator interface. +- **CLI Version Helper**: Reads package version from package.json for CLI --version flag. +- **Validate Patterns CLI**: Cross-validates TypeScript patterns vs Gherkin feature files. +- **Lint Patterns CLI**: Validates pattern annotations for quality and completeness. +- **Documentation Generator CLI**: Replaces multiple specialized CLIs with one unified interface that supports multiple generators in a single run. +- **CLI Error Handler**: Provides type-safe error handling for all CLI commands using the DocError discriminated union pattern. +- **CLI Schema**: :DataAPI Declarative schema defining all CLI options for the process-api command. +- **Lint Rules**: Defines lint rules that check @libar-docs-\* directives for completeness and quality. +- **Lint Module**: Provides lint rules and engine for pattern annotation quality checking. +- **Lint Engine**: Orchestrates lint rule execution against parsed directives. - **Workflow Loader**: Provides the default 6-phase workflow as an inline constant and loads custom workflow overrides from JSON files via... - **Configuration Types**: Type definitions for the delivery process configuration system. - **Regex Builders**: Type-safe regex factory functions for tag detection and normalization. @@ -139,23 +160,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Delivery Process Factory**: Main factory function for creating configured delivery process instances. - **Configuration Defaults**: Centralized default constants for the delivery-process package. - **Config Loader**: Discovers and loads `delivery-process.config.ts` files for hierarchical configuration. -- **Shape Extractor**: Extracts TypeScript type definitions (interfaces, type aliases, enums, function signatures) from source files for... -- **Layer Inference**: Infers feature file layer (timeline, domain, integration, e2e, component) from directory path patterns. -- **Gherkin Extractor**: Transforms scanned Gherkin feature files into ExtractedPattern objects for inclusion in generated documentation. -- **Dual Source Extractor**: Extracts pattern metadata from both TypeScript code stubs (@libar-docs-_) and Gherkin feature files (@libar-docs-_),... -- **Document Extractor**: Converts scanned file data into complete ExtractedPattern objects with unique IDs, inferred names, categories, and... -- **Lint Rules**: Defines lint rules that check @libar-docs-\* directives for completeness and quality. -- **Lint Module**: Provides lint rules and engine for pattern annotation quality checking. -- **Lint Engine**: Orchestrates lint rule execution against parsed directives. - **Scope Validator Impl**: Pure function composition over ProcessStateAPI and MasterDataset. - **Rules Query Module**: Pure query function for business rules extracted from Gherkin Rule: blocks. - **Handoff Generator Impl**: Pure function that assembles a handoff document from ProcessStateAPI and MasterDataset. -- **CLI Version Helper**: Reads package version from package.json for CLI --version flag. -- **Validate Patterns CLI**: Cross-validates TypeScript patterns vs Gherkin feature files. -- **Lint Patterns CLI**: Validates pattern annotations for quality and completeness. -- **Documentation Generator CLI**: Replaces multiple specialized CLIs with one unified interface that supports multiple generators in a single run. -- **CLI Error Handler**: Provides type-safe error handling for all CLI commands using the DocError discriminated union pattern. -- **CLI Schema**: :DataAPI Declarative schema defining all CLI options for the process-api command. - **Index Preamble Configuration β€” DD-3, DD-4 Decisions**: Decision DD-3 (Audience paths: preamble vs annotation-derived): Use full preamble for audience reading paths. - **IndexCodec Factory β€” DD-1 Implementation Stub**: Creates the IndexCodec as a Zod codec (MasterDataset -> RenderableDocument). - **IndexCodecOptions β€” DD-1, DD-5 Decisions**: Decision DD-1 (New IndexCodec vs extend existing): Create a new IndexCodec registered in CodecRegistry, NOT a... @@ -177,14 +184,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Business Rules Codec**: :Generation Transforms MasterDataset into a RenderableDocument for business rules output. - **Architecture Codec**: :Generation Transforms MasterDataset into a RenderableDocument containing architecture diagrams (Mermaid) generated... - **Adr Document Codec**: :Generation Transforms MasterDataset into RenderableDocument for Architecture Decision Records. -- **Process Api Reference Generator**: :Generation Generates `PROCESS-API-REFERENCE.md` from the declarative CLI schema. -- **Built In Generators**: Registers all codec-based generators on import using the RDM (RenderableDocument Model) architecture. -- **Decision Doc Generator**: Orchestrates the full pipeline for generating documentation from decision documents (ADR/PDR in .feature format): 1. -- **Codec Generator Registration**: Registers codec-based generators for the RenderableDocument Model (RDM) system. - **Transform Dataset**: Transforms raw extracted patterns into a MasterDataset with all pre-computed views. - **Merge Patterns**: Merges patterns from TypeScript and Gherkin sources with conflict detection. - **Pipeline Module**: Barrel export for the unified transformation pipeline components. - **Pipeline Factory**: Invariant: `buildMasterDataset()` is the shared factory for Steps 1-8 of the architecture pipeline and returns... +- **Process Api Reference Generator**: :Generation Generates `PROCESS-API-REFERENCE.md` from the declarative CLI schema. +- **Built In Generators**: Registers all codec-based generators on import using the RDM (RenderableDocument Model) architecture. +- **Decision Doc Generator**: Orchestrates the full pipeline for generating documentation from decision documents (ADR/PDR in .feature format): 1. +- **Codec Generator Registration**: Registers codec-based generators for the RenderableDocument Model (RDM) system. - **Codec Base Options**: Shared types, interfaces, and utilities for all document codecs. - **ADR 006 Single Read Model Architecture**: The delivery-process package applies event sourcing to itself: git is the event store, annotated source files are... - **ADR 005 Codec Based Markdown Rendering**: The documentation generator needs to transform structured pattern data (MasterDataset) into markdown files. @@ -218,6 +225,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Data API Platform Integration**: The process-api CLI requires subprocess invocation for every query, adding shell overhead and preventing stateful... - **Data API Output Shaping**: The ProcessStateAPI CLI returns raw `ExtractedPattern` objects via `JSON.stringify`. - **Data API Context Assembly**: Starting a Claude Code design or implementation session requires assembling 30-100KB of curated, multi-source context... +- **Data API CLI Ergonomics**: The process-api CLI runs the full pipeline (scan, extract, transform) on every invocation, taking 2-5 seconds. - **Data API Architecture Queries**: The current `arch` subcommand provides basic queries (roles, context, layer, graph) but lacks deeper analysis needed... - **Cross Cutting Document Inclusion**: The reference doc codec assembles content from four sources, each with its own selection mechanism: conventionTags... - **Config Based Workflow Definition**: Every `pnpm process:query` and `pnpm docs:*` invocation prints: `Failed to load default workflow (6-phase-standard):... @@ -227,7 +235,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Architecture Doc Refactoring**: ARCHITECTURE.md is 1,287 lines of manually-maintained documentation covering 14 sections. - **Architecture Diagram Core**: Problem: Architecture documentation requires manually maintaining mermaid diagrams that duplicate information already... - **Architecture Diagram Advanced**: Problem: Core diagram generation (see ArchitectureDiagramCore) produces component-level diagrams from `arch-*` tags. -- **String Utils**: String utilities provide consistent text transformations across the codebase. - **Status Transition Detection Testing**: Tests for the detectStatusTransitions function that parses git diff output. - **Process Guard Testing**: Pure validation functions for enforcing delivery process rules per PDR-005. - **FSM Validator Testing**: Pure validation functions for the 4-state FSM defined in PDR-005. @@ -235,14 +242,15 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Detect Changes Testing**: Tests for the detectDeliverableChanges function that parses git diff output. - **Config Schema Validation**: Configuration schemas validate scanner and generator inputs with security constraints to prevent path traversal... - **Anti Pattern Detector Testing**: Detects violations of the dual-source documentation architecture and process hygiene issues that lead to... +- **Result Monad**: The Result type provides explicit error handling via a discriminated union. +- **Error Factories**: Error factories create structured, discriminated error types with consistent message formatting. +- **String Utils**: String utilities provide consistent text transformations across the codebase. - **Gherkin Ast Parser**: The Gherkin AST parser extracts feature metadata, scenarios, and steps from .feature files for timeline generation... - **File Discovery**: The file discovery system uses glob patterns to find TypeScript files for documentation extraction. - **Doc String Media Type**: DocString language hints (mediaType) should be preserved through the parsing pipeline from feature files to rendered... - **Ast Parser Relationships Edges**: The AST Parser extracts @libar-docs-\* directives from TypeScript source files using the TypeScript compiler API. - **Ast Parser Metadata**: The AST Parser extracts @libar-docs-\* directives from TypeScript source files using the TypeScript compiler API. - **Ast Parser Exports**: The AST Parser extracts @libar-docs-\* directives from TypeScript source files using the TypeScript compiler API. -- **Result Monad**: The Result type provides explicit error handling via a discriminated union. -- **Error Factories**: Error factories create structured, discriminated error types with consistent message formatting. - **Rule Keyword Po C**: This feature tests whether vitest-cucumber supports the Rule keyword for organizing scenarios under business rules. - **Lint Rule Individual Testing**: Individual lint rules that check parsed directives for completeness. - **Lint Rule Advanced Testing**: Complex lint rule logic and collection-level behavior. @@ -309,6 +317,16 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Zod Codec Migration**: All JSON parsing and serialization uses type-safe Zod codec pattern, replacing raw JSON.parse/stringify with... - **Scope Validator Tests**: Starting an implementation or design session without checking prerequisites wastes time when blockers are discovered... - **Handoff Generator Tests**: Multi-session work loses critical state between sessions when handoff documentation is manual or forgotten. +- **Mermaid Relationship Rendering**: Tests for rendering all relationship types in Mermaid dependency graphs with distinct visual styles per relationship... +- **Linter Validation Testing**: Tests for lint rules that validate relationship integrity, detect conflicts, and ensure bidirectional traceability... +- **Implements Tag Processing**: Tests for the @libar-docs-implements tag which links implementation files to their corresponding roadmap pattern... +- **Extends Tag Testing**: Tests for the @libar-docs-extends tag which establishes generalization relationships between patterns (pattern... +- **Process Api Reference Tests**: Verifies that the declarative CLI schema drives reference table generation and stays in sync with the parser... +- **Layered Diagram Generation**: As a documentation generator I want to generate layered architecture diagrams from metadata So that system... +- **Arch Generator Registration**: As a CLI user I want an architecture generator registered in the generator registry So that I can run pnpm... +- **Component Diagram Generation**: As a documentation generator I want to generate component diagrams from architecture metadata So that system... +- **Arch Tag Extraction**: As a documentation generator I want architecture tags extracted from source code So that I can generate accurate... +- **Arch Index Dataset**: As a documentation generator I want an archIndex built during dataset transformation So that I can efficiently look... - **Timeline Codec Testing**: The timeline codecs (RoadmapDocumentCodec, CompletedMilestonesCodec, CurrentWorkCodec) transform MasterDataset into... - **Shape Selector Testing**: Tests the filterShapesBySelectors function that provides fine-grained shape selection via structural discriminated... - **Shape Matcher Testing**: Matches file paths against glob patterns for TypeScript shape extraction. @@ -327,15 +345,5 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - **Dedent Helper**: The dedent helper function normalizes indentation in code blocks extracted from DocStrings. - **Convention Extractor Testing**: Extracts convention content from MasterDataset decision records tagged with @libar-docs-convention. - **Composite Codec Testing**: Assembles reference documents from multiple codec outputs by concatenating RenderableDocument sections. -- **Process Api Reference Tests**: Verifies that the declarative CLI schema drives reference table generation and stays in sync with the parser... -- **Layered Diagram Generation**: As a documentation generator I want to generate layered architecture diagrams from metadata So that system... -- **Arch Generator Registration**: As a CLI user I want an architecture generator registered in the generator registry So that I can run pnpm... -- **Component Diagram Generation**: As a documentation generator I want to generate component diagrams from architecture metadata So that system... -- **Arch Tag Extraction**: As a documentation generator I want architecture tags extracted from source code So that I can generate accurate... -- **Arch Index Dataset**: As a documentation generator I want an archIndex built during dataset transformation So that I can efficiently look... -- **Mermaid Relationship Rendering**: Tests for rendering all relationship types in Mermaid dependency graphs with distinct visual styles per relationship... -- **Linter Validation Testing**: Tests for lint rules that validate relationship integrity, detect conflicts, and ensure bidirectional traceability... -- **Implements Tag Processing**: Tests for the @libar-docs-implements tag which links implementation files to their corresponding roadmap pattern... -- **Extends Tag Testing**: Tests for the @libar-docs-extends tag which establishes generalization relationships between patterns (pattern... --- diff --git a/docs-live/INDEX.md b/docs-live/INDEX.md index 7b68426e..70f0c34e 100644 --- a/docs-live/INDEX.md +++ b/docs-live/INDEX.md @@ -10,7 +10,7 @@ | ----------------- | ----------------------------------------------------- | | **Package** | @libar-dev/delivery-process | | **Purpose** | Code-first documentation and delivery process toolkit | -| **Patterns** | 364 tracked (256 completed, 53 active, 55 planned) | +| **Patterns** | 371 tracked (257 completed, 60 active, 54 planned) | | **Product Areas** | 7 | | **License** | MIT | @@ -149,23 +149,23 @@ | Annotation | 26 | 23 | 2 | 1 | [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘] 23/26 88% | | Configuration | 11 | 8 | 0 | 3 | [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘] 8/11 73% | | CoreTypes | 7 | 7 | 0 | 0 | [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ] 7/7 100% | -| DataAPI | 36 | 23 | 9 | 4 | [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘β–‘] 23/36 64% | +| DataAPI | 41 | 24 | 14 | 3 | [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘β–‘] 24/41 59% | | Generation | 94 | 81 | 5 | 8 | [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘] 81/94 86% | | Process | 11 | 4 | 0 | 7 | [β–ˆβ–ˆβ–ˆβ–‘β–‘β–‘β–‘β–‘] 4/11 36% | | Validation | 22 | 16 | 0 | 6 | [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘] 16/22 73% | -| **Total** | **207** | **162** | **16** | **29** | **[β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘] 162/207 78%** | +| **Total** | **212** | **163** | **21** | **28** | **[β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘] 163/212 77%** | --- ## Phase Progress -**364** patterns total: **256** completed (70%), **53** active, **55** planned. [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘β–‘β–‘β–‘β–‘] 256/364 +**371** patterns total: **257** completed (69%), **60** active, **54** planned. [β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘β–‘β–‘β–‘β–‘] 257/371 | Status | Count | Percentage | | --------- | ----- | ---------- | -| Completed | 256 | 70% | -| Active | 53 | 15% | -| Planned | 55 | 15% | +| Completed | 257 | 69% | +| Active | 60 | 16% | +| Planned | 54 | 15% | ### By Phase @@ -174,7 +174,7 @@ | Phase 18 | 1 | 0 | 0% | | Phase 23 | 2 | 2 | 100% | | Phase 24 | 2 | 2 | 100% | -| Phase 25 | 10 | 7 | 70% | +| Phase 25 | 10 | 8 | 80% | | Phase 26 | 2 | 2 | 100% | | Phase 27 | 3 | 3 | 100% | | Phase 28 | 2 | 2 | 100% | diff --git a/docs-live/PRODUCT-AREAS.md b/docs-live/PRODUCT-AREAS.md index 9957302a..a8cbd8b0 100644 --- a/docs-live/PRODUCT-AREAS.md +++ b/docs-live/PRODUCT-AREAS.md @@ -51,7 +51,7 @@ Validation is the enforcement boundary β€” it ensures that every change to annot The Data API provides direct terminal access to delivery process state. It replaces reading generated markdown or launching explore agents β€” targeted queries use 5-10x less context. The `context` command assembles curated bundles tailored to session type (planning, design, implement). -**36 patterns** β€” 23 completed, 9 active, 4 planned +**41 patterns** β€” 24 completed, 14 active, 3 planned **Key patterns:** DataAPIContextAssembly, ProcessStateAPICLI, DataAPIDesignSessionSupport, DataAPIRelationshipGraph, DataAPIOutputShaping @@ -85,10 +85,10 @@ Process defines the USDP-inspired session workflow that governs how work moves t | [Configuration](product-areas/CONFIGURATION.md) | 11 | 8 | 0 | 3 | | [Generation](product-areas/GENERATION.md) | 94 | 81 | 5 | 8 | | [Validation](product-areas/VALIDATION.md) | 22 | 16 | 0 | 6 | -| [DataAPI](product-areas/DATA-API.md) | 36 | 23 | 9 | 4 | +| [DataAPI](product-areas/DATA-API.md) | 41 | 24 | 14 | 3 | | [CoreTypes](product-areas/CORE-TYPES.md) | 7 | 7 | 0 | 0 | | [Process](product-areas/PROCESS.md) | 11 | 4 | 0 | 7 | -| **Total** | **207** | **162** | **16** | **29** | +| **Total** | **212** | **163** | **21** | **28** | --- @@ -120,9 +120,9 @@ C4Context System(DataAPIContextAssembly, "DataAPIContextAssembly") System(CrossCuttingDocumentInclusion, "CrossCuttingDocumentInclusion") System(CodecDrivenReferenceGeneration, "CodecDrivenReferenceGeneration") + System(StringUtils, "StringUtils") System(ResultMonad, "ResultMonad") System(ErrorFactories, "ErrorFactories") - System(StringUtils, "StringUtils") System(ExtractionPipelineEnhancementsTesting, "ExtractionPipelineEnhancementsTesting") System(KebabCaseSlugs, "KebabCaseSlugs") System(ErrorHandlingUnification, "ErrorHandlingUnification") @@ -199,9 +199,9 @@ graph LR DataAPIContextAssembly["DataAPIContextAssembly"] CrossCuttingDocumentInclusion["CrossCuttingDocumentInclusion"] CodecDrivenReferenceGeneration["CodecDrivenReferenceGeneration"] + StringUtils["StringUtils"] ResultMonad["ResultMonad"] ErrorFactories["ErrorFactories"] - StringUtils["StringUtils"] ExtractionPipelineEnhancementsTesting["ExtractionPipelineEnhancementsTesting"] KebabCaseSlugs["KebabCaseSlugs"] ErrorHandlingUnification["ErrorHandlingUnification"] diff --git a/docs-live/_claude-md/core-types/core-types-overview.md b/docs-live/_claude-md/core-types/core-types-overview.md index 8d1ccd04..fc647af8 100644 --- a/docs-live/_claude-md/core-types/core-types-overview.md +++ b/docs-live/_claude-md/core-types/core-types-overview.md @@ -9,7 +9,7 @@ - Branded nominal types: `Branded` creates compile-time distinct types from structural TypeScript. Prevents mixing `PatternId` with `CategoryName` even though both are `string` at runtime - String transformation consistency: `slugify` produces URL-safe identifiers, `camelCaseToTitleCase` preserves acronyms (e.g., "APIEndpoint" becomes "API Endpoint"), `toKebabCase` handles consecutive uppercase correctly -**Components:** Other (ResultMonad, ErrorFactories, StringUtils, KebabCaseSlugs, ErrorHandlingUnification) +**Components:** Other (StringUtils, ResultMonad, ErrorFactories, KebabCaseSlugs, ErrorHandlingUnification) #### API Types diff --git a/docs-live/business-rules/data-api.md b/docs-live/business-rules/data-api.md index 9922ff1e..90dd24f7 100644 --- a/docs-live/business-rules/data-api.md +++ b/docs-live/business-rules/data-api.md @@ -4,7 +4,7 @@ --- -**89 rules** from 21 features. 89 rules have explicit invariants. +**95 rules** from 26 features. 95 rules have explicit invariants. --- @@ -754,6 +754,26 @@ _Validates that summarizePattern() projects ExtractedPattern (~3.5KB) to_ _summarize.feature_ +### Process Api Cli Cache + +_MasterDataset caching between CLI invocations: cache hits, mtime invalidation, and --no-cache bypass._ + +--- + +#### MasterDataset is cached between invocations + +> **Invariant:** When source files have not changed between CLI invocations, the second invocation must use the cached MasterDataset and report cache.hit as true with reduced pipelineMs. +> +> **Rationale:** The pipeline rebuild costs 2-5 seconds per invocation. Caching eliminates this cost for repeated queries against unchanged sources, which is the common case during interactive AI sessions. + +**Verified by:** + +- Second query uses cached dataset +- Cache invalidated on source file change +- No-cache flag bypasses cache + +_data-api-cache.feature_ + ### Process Api Cli Core _Core CLI infrastructure: help, version, input validation, status, query, pattern, arch basics, missing args, edge cases._ @@ -867,6 +887,63 @@ _Core CLI infrastructure: help, version, input validation, status, query, patter _process-api-core.feature_ +### Process Api Cli Dry Run + +_Dry-run mode shows pipeline scope without processing data._ + +--- + +#### Dry-run shows pipeline scope without processing + +> **Invariant:** The --dry-run flag must display file counts, config status, and cache status without executing the pipeline. Output must contain the DRY RUN marker and must not contain a JSON success envelope. +> +> **Rationale:** Dry-run enables users to verify their input patterns resolve to expected files before committing to the 2-5s pipeline cost, which is especially valuable when debugging glob patterns or config auto-detection. + +**Verified by:** + +- Dry-run shows file counts + +_data-api-dryrun.feature_ + +### Process Api Cli Help + +_Per-subcommand help displays usage, flags, and examples for individual subcommands._ + +--- + +#### Per-subcommand help shows usage and flags + +> **Invariant:** Running any subcommand with --help must display usage information specific to that subcommand, including applicable flags and examples. Unknown subcommands must fall back to a descriptive message. +> +> **Rationale:** Per-subcommand help replaces the need to scroll through full --help output and provides contextual guidance for subcommand-specific flags like --session. + +**Verified by:** + +- Per-subcommand help for context +- Global help still works +- Unknown subcommand help + +_data-api-help.feature_ + +### Process Api Cli Metadata + +_Response metadata includes validation summary and pipeline timing for diagnostics._ + +--- + +#### Response metadata includes validation summary + +> **Invariant:** Every JSON response envelope must include a metadata.validation object with danglingReferenceCount, malformedPatternCount, unknownStatusCount, and warningCount fields, plus a numeric pipelineMs timing. +> +> **Rationale:** Consumers use validation counts to detect annotation quality degradation without running a separate validation pass. Pipeline timing enables performance regression detection in CI. + +**Verified by:** + +- Validation summary in response metadata +- Pipeline timing in metadata + +_data-api-metadata.feature_ + ### Process Api Cli Modifiers And Rules _Output modifiers, arch health, and rules subcommand._ @@ -921,6 +998,37 @@ _Output modifiers, arch health, and rules subcommand._ _process-api-modifiers-rules.feature_ +### Process Api Cli Repl + +_Interactive REPL mode keeps the pipeline loaded for multi-query sessions and supports reload._ + +--- + +#### REPL mode accepts multiple queries on a single pipeline load + +> **Invariant:** REPL mode loads the pipeline once and accepts multiple queries on stdin, eliminating per-query pipeline overhead. +> +> **Rationale:** Design sessions involve 10-20 exploratory queries in sequence. REPL mode eliminates per-query pipeline overhead entirely. + +**Verified by:** + +- REPL accepts multiple queries +- REPL shows help output + +--- + +#### REPL reload rebuilds the pipeline from fresh sources + +> **Invariant:** The reload command rebuilds the pipeline from fresh sources and subsequent queries use the new dataset. +> +> **Rationale:** During implementation sessions, source files change frequently. Reload allows refreshing without restarting the REPL. + +**Verified by:** + +- REPL reloads pipeline on command + +_data-api-repl.feature_ + ### Process Api Cli Subcommands _Discovery subcommands: list, search, context assembly, tags/sources, extended arch, unannotated._ diff --git a/docs-live/product-areas/CORE-TYPES.md b/docs-live/product-areas/CORE-TYPES.md index a251499a..7ac1d6cb 100644 --- a/docs-live/product-areas/CORE-TYPES.md +++ b/docs-live/product-areas/CORE-TYPES.md @@ -33,9 +33,9 @@ Scoped architecture diagram showing component relationships: ```mermaid C4Context title Core Type System + System(StringUtils, "StringUtils") System(ResultMonad, "ResultMonad") System(ErrorFactories, "ErrorFactories") - System(StringUtils, "StringUtils") System(KebabCaseSlugs, "KebabCaseSlugs") System(ErrorHandlingUnification, "ErrorHandlingUnification") Rel(KebabCaseSlugs, StringUtils, "depends on") @@ -51,9 +51,9 @@ Scoped architecture diagram showing component relationships: ```mermaid graph LR + StringUtils["StringUtils"] ResultMonad["ResultMonad"] ErrorFactories["ErrorFactories"] - StringUtils["StringUtils"] KebabCaseSlugs["KebabCaseSlugs"] ErrorHandlingUnification["ErrorHandlingUnification"] KebabCaseSlugs -.->|depends on| StringUtils diff --git a/docs-live/product-areas/DATA-API.md b/docs-live/product-areas/DATA-API.md index 85832611..29debd55 100644 --- a/docs-live/product-areas/DATA-API.md +++ b/docs-live/product-areas/DATA-API.md @@ -78,11 +78,14 @@ graph TB ArchQueriesImpl("ArchQueriesImpl") end subgraph cli["Cli"] + ReplMode("ReplMode") ProcessAPICLIImpl("ProcessAPICLIImpl") OutputPipelineImpl("OutputPipelineImpl") + DatasetCache[/"DatasetCache"/] CLISchema["CLISchema"] end subgraph related["Related"] + WorkflowConfigSchema["WorkflowConfigSchema"]:::neighbor Pattern_Scanner["Pattern Scanner"]:::neighbor StubResolverImpl["StubResolverImpl"]:::neighbor RulesQueryModule["RulesQueryModule"]:::neighbor @@ -94,8 +97,12 @@ graph TB DataAPIDesignSessionSupport["DataAPIDesignSessionSupport"]:::neighbor DataAPIOutputShaping["DataAPIOutputShaping"]:::neighbor DataAPIContextAssembly["DataAPIContextAssembly"]:::neighbor + DataAPICLIErgonomics["DataAPICLIErgonomics"]:::neighbor DataAPIArchitectureQueries["DataAPIArchitectureQueries"]:::neighbor end + ReplMode -->|uses| PipelineFactory + ReplMode -->|uses| ProcessStateAPI + ReplMode ..->|implements| DataAPICLIErgonomics ProcessAPICLIImpl -->|uses| ProcessStateAPI ProcessAPICLIImpl -->|uses| MasterDataset ProcessAPICLIImpl -->|uses| PipelineFactory @@ -106,6 +113,9 @@ graph TB ProcessAPICLIImpl ..->|implements| ProcessStateAPICLI OutputPipelineImpl -->|uses| PatternSummarizerImpl OutputPipelineImpl ..->|implements| DataAPIOutputShaping + DatasetCache -->|uses| PipelineFactory + DatasetCache -->|uses| WorkflowConfigSchema + DatasetCache ..->|implements| DataAPICLIErgonomics CLISchema ..->|implements| ProcessApiHybridGeneration PatternSummarizerImpl -->|uses| ProcessStateAPI PatternSummarizerImpl ..->|implements| DataAPIOutputShaping @@ -472,7 +482,7 @@ ArchIndexSchema = z.object({ ## Business Rules -34 patterns, 145 rules with invariants (145 total) +39 patterns, 151 rules with invariants (151 total) ### Arch Queries Test @@ -674,6 +684,12 @@ ArchIndexSchema = z.object({ | DD-6 - Both positional and flag forms for scope type | scope-validate must accept scope type as both a positional argument and a --type flag. | Supporting only one form creates inconsistency with CLI conventions and forces users to remember which form each subcommand uses. | | DD-7 - Co-located formatter functions | Each module must export both its data builder and text formatter as co-located functions. | Splitting builder and formatter across files increases coupling surface and makes it harder to trace data flow through the module. | +### Process Api Cli Cache + +| Rule | Invariant | Rationale | +| ------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| MasterDataset is cached between invocations | When source files have not changed between CLI invocations, the second invocation must use the cached MasterDataset and report cache.hit as true with reduced pipelineMs. | The pipeline rebuild costs 2-5 seconds per invocation. Caching eliminates this cost for repeated queries against unchanged sources, which is the common case during interactive AI sessions. | + ### Process Api Cli Core | Rule | Invariant | Rationale | @@ -687,6 +703,24 @@ ArchIndexSchema = z.object({ | CLI shows errors for missing subcommand arguments | Subcommands that require arguments must reject invocations with missing arguments and display usage guidance. | Silent acceptance of incomplete input would produce confusing pipeline errors instead of actionable feedback at the CLI boundary. | | CLI handles argument edge cases | The CLI must gracefully handle non-standard argument forms including numeric coercion and the `--` pnpm separator. | Real-world invocations via pnpm pass `--` separators and numeric strings; mishandling these causes silent data loss or crashes in automated workflows. | +### Process Api Cli Dry Run + +| Rule | Invariant | Rationale | +| ----------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Dry-run shows pipeline scope without processing | The --dry-run flag must display file counts, config status, and cache status without executing the pipeline. Output must contain the DRY RUN marker and must not contain a JSON success envelope. | Dry-run enables users to verify their input patterns resolve to expected files before committing to the 2-5s pipeline cost, which is especially valuable when debugging glob patterns or config auto-detection. | + +### Process Api Cli Help + +| Rule | Invariant | Rationale | +| ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Per-subcommand help shows usage and flags | Running any subcommand with --help must display usage information specific to that subcommand, including applicable flags and examples. Unknown subcommands must fall back to a descriptive message. | Per-subcommand help replaces the need to scroll through full --help output and provides contextual guidance for subcommand-specific flags like --session. | + +### Process Api Cli Metadata + +| Rule | Invariant | Rationale | +| --------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Response metadata includes validation summary | Every JSON response envelope must include a metadata.validation object with danglingReferenceCount, malformedPatternCount, unknownStatusCount, and warningCount fields, plus a numeric pipelineMs timing. | Consumers use validation counts to detect annotation quality degradation without running a separate validation pass. Pipeline timing enables performance regression detection in CI. | + ### Process Api Cli Modifiers And Rules | Rule | Invariant | Rationale | @@ -695,6 +729,13 @@ ArchIndexSchema = z.object({ | CLI arch health subcommands detect graph quality issues | Health subcommands (dangling, orphans, blocking) operate on the relationship index, not the architecture index, and return results without requiring arch annotations. | Graph quality issues (broken references, isolated patterns, blocked dependencies) are relationship-level concerns that should be queryable even when no architecture metadata exists. | | CLI rules subcommand queries business rules and invariants | The rules subcommand returns structured business rules extracted from Gherkin Rule: blocks, grouped by product area and phase, with parsed invariant and rationale annotations. | Live business rule queries replace static generated markdown, enabling on-demand filtering by product area, pattern, and invariant presence. | +### Process Api Cli Repl + +| Rule | Invariant | Rationale | +| ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------- | +| REPL mode accepts multiple queries on a single pipeline load | REPL mode loads the pipeline once and accepts multiple queries on stdin, eliminating per-query pipeline overhead. | Design sessions involve 10-20 exploratory queries in sequence. REPL mode eliminates per-query pipeline overhead entirely. | +| REPL reload rebuilds the pipeline from fresh sources | The reload command rebuilds the pipeline from fresh sources and subsequent queries use the new dataset. | During implementation sessions, source files change frequently. Reload allows refreshing without restarting the REPL. | + ### Process Api Cli Subcommands | Rule | Invariant | Rationale | diff --git a/docs-live/product-areas/GENERATION.md b/docs-live/product-areas/GENERATION.md index 031b2172..025433ed 100644 --- a/docs-live/product-areas/GENERATION.md +++ b/docs-live/product-areas/GENERATION.md @@ -78,9 +78,9 @@ graph TB ArchitectureCodec[("ArchitectureCodec")] end subgraph related["Related"] + MasterDataset["MasterDataset"]:::neighbor Pattern_Scanner["Pattern Scanner"]:::neighbor GherkinASTParser["GherkinASTParser"]:::neighbor - MasterDataset["MasterDataset"]:::neighbor ShapeExtractor["ShapeExtractor"]:::neighbor ReferenceDocShowcase["ReferenceDocShowcase"]:::neighbor ProcessApiHybridGeneration["ProcessApiHybridGeneration"]:::neighbor diff --git a/src/api/types.ts b/src/api/types.ts index c4dde40c..1d60936d 100644 --- a/src/api/types.ts +++ b/src/api/types.ts @@ -24,6 +24,24 @@ import type { ImplementationRef } from '../validation-schemas/master-dataset.js' // Query Response Types // ============================================================================= +/** + * Optional extended metadata for query responses. + * Populated when the pipeline runs (not for FSM short-circuit queries). + */ +export interface QueryMetadataExtra { + readonly validation?: { + readonly danglingReferenceCount: number; + readonly malformedPatternCount: number; + readonly unknownStatusCount: number; + readonly warningCount: number; + }; + readonly cache?: { + readonly hit: boolean; + readonly ageMs?: number; + }; + readonly pipelineMs?: number; +} + /** * Successful query response */ @@ -33,7 +51,7 @@ export interface QuerySuccess { metadata: { timestamp: string; patternCount: number; - }; + } & QueryMetadataExtra; } /** @@ -247,15 +265,20 @@ export interface NeighborEntry { // ============================================================================= /** - * Create a success response + * Create a success response with optional extended metadata. */ -export function createSuccess(data: T, patternCount: number): QuerySuccess { +export function createSuccess( + data: T, + patternCount: number, + extra?: QueryMetadataExtra +): QuerySuccess { return { success: true, data, metadata: { timestamp: new Date().toISOString(), patternCount, + ...extra, }, }; } diff --git a/src/cli/dataset-cache.ts b/src/cli/dataset-cache.ts new file mode 100644 index 00000000..52829848 --- /dev/null +++ b/src/cli/dataset-cache.ts @@ -0,0 +1,224 @@ +/** + * @libar-docs + * @libar-docs-cli + * @libar-docs-pattern DatasetCache + * @libar-docs-status active + * @libar-docs-implements DataAPICLIErgonomics + * @libar-docs-arch-role infrastructure + * @libar-docs-arch-context cli + * @libar-docs-arch-layer infrastructure + * @libar-docs-uses PipelineFactory, WorkflowConfigSchema + * + * ## Dataset Cache - MasterDataset Persistence with mtime Invalidation + * + * Caches the full PipelineResult (MasterDataset + ValidationSummary + warnings) + * to a JSON file. Subsequent CLI invocations skip the 2-5s pipeline rebuild + * when no source files have changed. + * + * ### Design Decisions + * + * - DD-1: Excludes LoadedWorkflow (contains Maps), reconstructs on load via createLoadedWorkflow() + * - DD-2: Cache at node_modules/.cache/delivery-process/dataset.json + * - DD-3: Cache key = sha256(sorted file mtimes + pipeline options hash) + * - DD-4: All errors produce cache miss (never throw) + */ + +import * as fs from 'fs'; +import * as fsp from 'fs/promises'; +import * as path from 'path'; +import * as crypto from 'crypto'; +import { glob } from 'glob'; +import type { PipelineResult, PipelineOptions } from '../generators/pipeline/index.js'; +import type { RuntimeMasterDataset } from '../generators/pipeline/index.js'; +import type { WorkflowConfig } from '../validation-schemas/workflow-config.js'; +import { createLoadedWorkflow } from '../validation-schemas/workflow-config.js'; + +// ============================================================================= +// Types +// ============================================================================= + +interface CacheMetadata { + readonly key: string; + readonly timestamp: string; + readonly version: string; +} + +interface CachedPipelineData { + readonly metadata: CacheMetadata; + readonly dataset: unknown; + readonly workflowConfig: WorkflowConfig | null; + readonly validation: PipelineResult['validation']; + readonly warnings: PipelineResult['warnings']; + readonly scanMetadata: PipelineResult['scanMetadata']; +} + +/** Cache format version β€” bump when the serialization format changes. */ +const CACHE_VERSION = '1'; + +// ============================================================================= +// Public API +// ============================================================================= + +/** + * Resolve the cache directory for a given base directory. + */ +export function getCacheDir(baseDir: string): string { + return path.join(path.resolve(baseDir), 'node_modules', '.cache', 'delivery-process'); +} + +/** + * Compute a cache key from pipeline options and source file mtimes. + * + * The key captures: + * - Sorted list of source files with their modification times + * - Pipeline options that affect output (input patterns, features, baseDir, workflowPath) + * + * Any change to source files or pipeline config produces a different key. + */ +export async function computeCacheKey(opts: PipelineOptions): Promise { + const baseDir = path.resolve(opts.baseDir); + const hash = crypto.createHash('sha256'); + + // Hash pipeline options that affect dataset output + hash.update( + JSON.stringify({ + input: opts.input, + features: opts.features, + baseDir, + workflowPath: opts.workflowPath ?? null, + mergeConflictStrategy: opts.mergeConflictStrategy, + includeValidation: opts.includeValidation ?? true, + }) + ); + + // Resolve glob patterns to file lists and collect mtimes + const fileMtimes: string[] = []; + + for (const pattern of [...opts.input, ...opts.features]) { + const files = await glob(pattern, { cwd: baseDir, absolute: true }); + for (const file of files.sort()) { + try { + const stat = await fsp.stat(file); + fileMtimes.push(`${file}:${stat.mtimeMs}`); + } catch { + // File disappeared between glob and stat β€” include as changed + fileMtimes.push(`${file}:missing`); + } + } + } + + // Also include config file mtime if it exists (.ts or .js) + for (const configName of ['delivery-process.config.ts', 'delivery-process.config.js']) { + const configPath = path.join(baseDir, configName); + try { + const configStat = await fsp.stat(configPath); + fileMtimes.push(`${configPath}:${configStat.mtimeMs}`); + } catch { + fileMtimes.push(`${configPath}:absent`); + } + } + + hash.update(fileMtimes.join('\n')); + return hash.digest('hex'); +} + +/** + * Attempt to load a cached PipelineResult. + * + * Returns `undefined` on any error (corrupt cache, key mismatch, etc.). + * Never throws β€” the caller should fall back to running the full pipeline. + */ +export async function tryLoadCache( + cacheKey: string, + cacheDir: string +): Promise<{ result: PipelineResult; ageMs: number } | undefined> { + try { + const cachePath = path.join(cacheDir, 'dataset.json'); + const raw = await fsp.readFile(cachePath, 'utf-8'); + const cached: CachedPipelineData = JSON.parse(raw) as CachedPipelineData; + + // Validate cache version and key + if (cached.metadata.version !== CACHE_VERSION) return undefined; + if (cached.metadata.key !== cacheKey) return undefined; + + // Reconstruct RuntimeMasterDataset from plain MasterDataset + WorkflowConfig + const dataset = cached.dataset as RuntimeMasterDataset; + if (cached.workflowConfig !== null) { + const workflow = createLoadedWorkflow(cached.workflowConfig); + // Assign workflow back onto the deserialized dataset (Maps are not JSON-serializable) + Object.assign(dataset, { workflow }); + } + + const ageMs = Date.now() - new Date(cached.metadata.timestamp).getTime(); + + return { + result: { + dataset, + validation: cached.validation, + warnings: cached.warnings, + scanMetadata: cached.scanMetadata, + }, + ageMs, + }; + } catch { + return undefined; + } +} + +/** + * Write a PipelineResult to the cache file. + * + * Strips the non-serializable `workflow` field and stores the `WorkflowConfig` + * separately for reconstruction on load. Uses atomic write (tmp + rename). + * + * Never throws β€” cache write failures are silently ignored. + */ +export async function writeCache( + result: PipelineResult, + cacheKey: string, + cacheDir: string +): Promise { + try { + await fsp.mkdir(cacheDir, { recursive: true }); + + // Extract WorkflowConfig (serializable) from LoadedWorkflow (has Maps) + const workflowConfig: WorkflowConfig | null = result.dataset.workflow?.config ?? null; + + // Strip the non-serializable workflow field from the dataset + const { workflow: _workflow, ...serializableDataset } = result.dataset; + + const cacheData: CachedPipelineData = { + metadata: { + key: cacheKey, + timestamp: new Date().toISOString(), + version: CACHE_VERSION, + }, + dataset: serializableDataset, + workflowConfig, + validation: result.validation, + warnings: result.warnings, + scanMetadata: result.scanMetadata, + }; + + const cachePath = path.join(cacheDir, 'dataset.json'); + const tmpPath = `${cachePath}.${process.pid}.${Date.now()}.tmp`; + + await fsp.writeFile(tmpPath, JSON.stringify(cacheData), 'utf-8'); + await fsp.rename(tmpPath, cachePath); + } catch { + // Cache write failure is not fatal β€” next run will rebuild + } +} + +/** + * Check whether a cache file exists (for dry-run reporting). + */ +export function cacheFileExists(cacheDir: string): { exists: boolean; sizeBytes?: number } { + try { + const cachePath = path.join(cacheDir, 'dataset.json'); + const stat = fs.statSync(cachePath); + return { exists: true, sizeBytes: stat.size }; + } catch { + return { exists: false }; + } +} diff --git a/src/cli/process-api.ts b/src/cli/process-api.ts index c10e25e7..9d5845ed 100644 --- a/src/cli/process-api.ts +++ b/src/cli/process-api.ts @@ -47,7 +47,19 @@ import { createProcessStateAPI } from '../api/process-state.js'; import type { ProcessStateAPI } from '../api/process-state.js'; import type { ExtractedPattern } from '../validation-schemas/index.js'; import type { TagRegistry } from '../validation-schemas/tag-registry.js'; -import { createSuccess, createError, QueryApiError } from '../api/types.js'; +import { + createSuccess, + createError, + QueryApiError, + type QueryMetadataExtra, +} from '../api/types.js'; +import { + computeCacheKey, + tryLoadCache, + writeCache, + getCacheDir, + cacheFileExists, +} from './dataset-cache.js'; import { handleCliError } from './error-handler.js'; import { printVersionAndExit } from './version.js'; import { CLI_SCHEMA } from './cli-schema.js'; @@ -117,6 +129,8 @@ import { type HandoffSessionType, } from '../api/handoff-generator.js'; import { execSync } from 'child_process'; +import { glob } from 'glob'; +import { startRepl } from './repl.js'; import { queryBusinessRules } from '../api/rules-query.js'; import type { RulesFilters } from '../api/rules-query.js'; @@ -136,6 +150,9 @@ interface ProcessAPICLIConfig { modifiers: OutputModifiers; format: 'json' | 'compact'; sessionType: SessionType | null; + noCache: boolean; + dryRun: boolean; + subcommandHelp: string | null; } // ============================================================================= @@ -155,6 +172,9 @@ function parseArgs(argv: string[] = process.argv.slice(2)): ProcessAPICLIConfig modifiers: { ...DEFAULT_OUTPUT_MODIFIERS }, format: 'json', sessionType: null, + noCache: false, + dryRun: false, + subcommandHelp: null, }; // Mutable modifiers for parsing @@ -176,7 +196,12 @@ function parseArgs(argv: string[] = process.argv.slice(2)): ProcessAPICLIConfig // Handle --help and --version regardless of position if (arg === '-h' || arg === '--help') { - config.help = true; + // If a subcommand was already parsed, this is per-subcommand help + if (config.subcommand !== null) { + config.subcommandHelp = config.subcommand; + } else { + config.help = true; + } continue; } if (arg === '-v' || arg === '--version') { @@ -184,6 +209,16 @@ function parseArgs(argv: string[] = process.argv.slice(2)): ProcessAPICLIConfig continue; } + // Handle cache and diagnostic flags regardless of position + if (arg === '--no-cache') { + config.noCache = true; + continue; + } + if (arg === '--dry-run') { + config.dryRun = true; + continue; + } + // Handle output modifiers regardless of position (before or after subcommand) if (arg === '--names-only') { namesOnly = true; @@ -435,6 +470,115 @@ Available API Methods (for 'query'): `); } +/** + * Per-subcommand help: shows usage, flags, and examples for a specific subcommand. + * Looks up command narrative from CLI_SCHEMA.commandNarratives. + */ +function showSubcommandHelp(subcommand: string): void { + // Search for the command in commandNarratives groups + const narratives = CLI_SCHEMA.commandNarratives; + if (narratives !== undefined) { + for (const group of narratives) { + for (const cmd of group.commands) { + if (cmd.command === subcommand) { + console.log(`\nprocess-api ${subcommand} β€” ${cmd.description}\n`); + console.log(`Usage: ${cmd.usageExample}\n`); + if (cmd.details !== undefined) { + console.log(cmd.details); + console.log(''); + } + if (cmd.expectedOutput !== undefined) { + console.log(`Expected output: ${cmd.expectedOutput}\n`); + } + + // Show applicable option groups + const applicableGroups = getSubcommandOptionGroups(subcommand); + for (const groupKey of applicableGroups) { + const optGroup = CLI_SCHEMA[groupKey as keyof typeof CLI_SCHEMA] as + | CLIOptionGroup + | undefined; + if (optGroup !== undefined && 'options' in optGroup) { + console.log(`${optGroup.title}:\n`); + console.log(formatHelpOptions(optGroup)); + console.log(''); + } + } + return; + } + } + } + } + + // Fallback: subcommand not found in narratives + console.log(`\nNo detailed help available for '${subcommand}'.`); + console.log('Run process-api --help for the full command reference.\n'); +} + +/** + * Map subcommands to their applicable CLI option groups. + */ +function getSubcommandOptionGroups(subcommand: string): readonly string[] { + const mapping: Record = { + context: ['sessionOptions'], + 'scope-validate': ['sessionOptions'], + list: ['listFilters', 'outputModifiers'], + search: ['outputModifiers'], + query: ['outputModifiers'], + status: ['outputModifiers'], + pattern: ['outputModifiers'], + stubs: ['outputModifiers'], + decisions: ['outputModifiers'], + pdr: ['outputModifiers'], + rules: ['outputModifiers'], + tags: ['outputModifiers'], + sources: ['outputModifiers'], + arch: ['outputModifiers'], + sequence: ['outputModifiers'], + }; + return mapping[subcommand] ?? []; +} + +/** + * Execute dry-run: show pipeline scope (files, config, cache) without processing. + */ +async function executeDryRun(opts: ProcessAPICLIConfig): Promise { + const baseDir = path.resolve(opts.baseDir); + + // Resolve globs to file lists + const tsFiles = await glob(opts.input, { cwd: baseDir }); + const featureFiles = await glob(opts.features, { cwd: baseDir }); + + // Check config file + const configPath = path.join(baseDir, 'delivery-process.config.ts'); + const hasConfig = fs.existsSync(configPath); + + // Check cache status + const cacheDir = getCacheDir(opts.baseDir); + const cacheInfo = cacheFileExists(cacheDir); + + console.log('=== DRY RUN ==='); + console.log( + `Config: ${hasConfig ? 'delivery-process.config.ts (auto-detected)' : 'none (filesystem fallback)'}` + ); + console.log(`Base dir: ${baseDir}`); + console.log(`Input patterns: ${opts.input.join(', ')}`); + console.log(`Feature patterns: ${opts.features.join(', ')}`); + console.log(`TypeScript files: ${tsFiles.length}`); + console.log(`Feature files: ${featureFiles.length}`); + console.log(`Workflow: ${opts.workflowPath ?? 'default (6-phase-standard)'}`); + if (cacheInfo.exists) { + const sizeKb = + cacheInfo.sizeBytes !== undefined + ? `${(cacheInfo.sizeBytes / 1024).toFixed(1)}KB` + : 'unknown'; + console.log(`Cache: ${path.join(cacheDir, 'dataset.json')} (${sizeKb})`); + } else { + console.log('Cache: none'); + } + console.log(`Subcommand: ${opts.subcommand ?? '(none)'}`); + console.log('\nNo pipeline processing performed.'); +} + // ============================================================================= // Config File Default Resolution // ============================================================================= @@ -1558,6 +1702,27 @@ async function routeSubcommand(ctx: RouteContext): Promise { // Main // ============================================================================= +/** + * Build extended query metadata from pipeline results. + */ +function buildQueryMetadataExtra( + validation: ValidationSummary, + cacheHit: boolean, + cacheAgeMs: number | undefined, + pipelineMs: number +): QueryMetadataExtra { + return { + validation: { + danglingReferenceCount: validation.danglingReferences.length, + malformedPatternCount: validation.malformedPatterns.length, + unknownStatusCount: validation.unknownStatuses.length, + warningCount: validation.warningCount, + }, + cache: cacheAgeMs !== undefined ? { hit: cacheHit, ageMs: cacheAgeMs } : { hit: cacheHit }, + pipelineMs, + }; +} + async function main(): Promise { const opts = parseArgs(); @@ -1570,6 +1735,24 @@ async function main(): Promise { process.exit(opts.help ? 0 : 1); } + // Per-subcommand help (e.g., `process-api context --help`) + if (opts.subcommandHelp !== null) { + showSubcommandHelp(opts.subcommandHelp); + process.exit(0); + } + + // REPL mode: interactive multi-query session (manages its own pipeline lifecycle) + if (opts.subcommand === 'repl') { + await applyConfigDefaults(opts); + await startRepl({ + input: opts.input, + features: opts.features, + baseDir: opts.baseDir, + workflowPath: opts.workflowPath, + }); + return; + } + // Validate output modifiers before any expensive work validateModifiers(opts.modifiers); @@ -1597,8 +1780,46 @@ async function main(): Promise { process.exit(1); } - // Build pipeline (steps 1-8) - const { dataset: masterDataset, validation } = await buildPipeline(opts); + // Dry-run: show pipeline scope without executing + if (opts.dryRun) { + await executeDryRun(opts); + return; + } + + // Pipeline execution with caching + const startMs = performance.now(); + let pipelineResult: PipelineResult; + let cacheHit = false; + let cacheAgeMs: number | undefined; + + if (!opts.noCache) { + const cacheDir = getCacheDir(opts.baseDir); + const cacheKey = await computeCacheKey({ + input: opts.input, + features: opts.features, + baseDir: opts.baseDir, + mergeConflictStrategy: 'fatal', + ...(opts.workflowPath !== null ? { workflowPath: opts.workflowPath } : {}), + }); + + const cached = await tryLoadCache(cacheKey, cacheDir); + if (cached !== undefined) { + pipelineResult = cached.result; + cacheHit = true; + cacheAgeMs = cached.ageMs; + } else { + pipelineResult = await buildPipeline(opts); + void writeCache(pipelineResult, cacheKey, cacheDir); + } + } else { + pipelineResult = await buildPipeline(opts); + } + + const pipelineMs = Math.round(performance.now() - startMs); + const { dataset: masterDataset, validation } = pipelineResult; + + // Build extended metadata for JSON responses + const extra = buildQueryMetadataExtra(validation, cacheHit, cacheAgeMs, pipelineMs); // Create ProcessStateAPI const api = createProcessStateAPI(masterDataset); @@ -1623,7 +1844,7 @@ async function main(): Promise { if (typeof result === 'string') { console.log(result); } else { - const envelope = createSuccess(result, masterDataset.counts.total); + const envelope = createSuccess(result, masterDataset.counts.total, extra); const output = formatOutput(envelope, opts.format); console.log(output); } diff --git a/src/cli/repl.ts b/src/cli/repl.ts new file mode 100644 index 00000000..76f1ee9a --- /dev/null +++ b/src/cli/repl.ts @@ -0,0 +1,295 @@ +/** + * @libar-docs + * @libar-docs-cli + * @libar-docs-pattern ReplMode + * @libar-docs-status active + * @libar-docs-implements DataAPICLIErgonomics + * @libar-docs-arch-role service + * @libar-docs-arch-context cli + * @libar-docs-arch-layer application + * @libar-docs-uses PipelineFactory, ProcessStateAPI + * + * ## REPL Mode - Interactive Multi-Query Pipeline Session + * + * Loads the pipeline once and accepts multiple queries on stdin. + * Eliminates per-query pipeline overhead for design sessions with 10-20 + * exploratory queries in sequence. + * + * ### Special Commands + * + * - `quit` / `exit` β€” exit the REPL + * - `reload` β€” rebuild the pipeline from fresh sources + * - `help` β€” list available subcommands + */ + +import * as readline from 'node:readline/promises'; +import * as path from 'path'; +import { + buildMasterDataset, + type PipelineResult, + type RuntimeMasterDataset, + type ValidationSummary, +} from '../generators/pipeline/index.js'; +import { createProcessStateAPI } from '../api/process-state.js'; +import type { ProcessStateAPI } from '../api/process-state.js'; +import { QueryApiError, createSuccess, createError } from '../api/types.js'; +import { formatOutput } from './output-pipeline.js'; +import { + assembleContext, + buildDepTree, + buildOverview, + isValidSessionType, + type SessionType, +} from '../api/context-assembler.js'; +import { formatContextBundle, formatDepTree, formatOverview } from '../api/context-formatter.js'; + +// ============================================================================= +// Types +// ============================================================================= + +export interface ReplOptions { + readonly input: readonly string[]; + readonly features: readonly string[]; + readonly baseDir: string; + readonly workflowPath: string | null; +} + +interface ReplState { + api: ProcessStateAPI; + dataset: RuntimeMasterDataset; + validation: ValidationSummary; +} + +// ============================================================================= +// Pipeline Loading +// ============================================================================= + +async function loadPipeline(opts: ReplOptions): Promise { + const result = await buildMasterDataset({ + input: opts.input, + features: opts.features, + baseDir: opts.baseDir, + mergeConflictStrategy: 'fatal', + ...(opts.workflowPath !== null ? { workflowPath: opts.workflowPath } : {}), + }); + if (!result.ok) { + throw new Error(`Pipeline error [${result.error.step}]: ${result.error.message}`); + } + return result.value; +} + +// ============================================================================= +// Command Dispatch +// ============================================================================= + +function dispatchCommand(line: string, state: ReplState, opts: ReplOptions): string { + const parts = line.trim().split(/\s+/); + const subcommand = parts[0]; + const subArgs = parts.slice(1); + + if (subcommand === undefined || subcommand === '') { + return ''; + } + + switch (subcommand) { + case 'status': { + const data = { + counts: state.api.getStatusCounts(), + completionPercentage: state.api.getCompletionPercentage(), + }; + return formatOutput(createSuccess(data, state.dataset.counts.total), 'json'); + } + + case 'overview': { + const bundle = buildOverview(state.dataset); + return formatOverview(bundle); + } + + case 'context': { + const patternArg = subArgs[0]; + if (patternArg === undefined) { + throw new QueryApiError('INVALID_ARGUMENT', 'Usage: context [--session ]'); + } + let sessionType: SessionType = 'planning'; + const sessionIdx = subArgs.indexOf('--session'); + const sessionVal = sessionIdx !== -1 ? subArgs[sessionIdx + 1] : undefined; + if (sessionVal !== undefined && isValidSessionType(sessionVal)) { + sessionType = sessionVal; + } + const baseDir = path.resolve(opts.baseDir); + const bundle = assembleContext(state.dataset, state.api, { + patterns: [patternArg], + sessionType, + baseDir, + }); + return formatContextBundle(bundle); + } + + case 'dep-tree': { + const patternArg = subArgs[0]; + if (patternArg === undefined) { + throw new QueryApiError('INVALID_ARGUMENT', 'Usage: dep-tree [--depth N]'); + } + let depth = 3; + const depthIdx = subArgs.indexOf('--depth'); + const depthVal = depthIdx !== -1 ? subArgs[depthIdx + 1] : undefined; + if (depthVal !== undefined) { + const parsed = parseInt(depthVal, 10); + if (!isNaN(parsed) && parsed > 0) { + depth = parsed; + } + } + const tree = buildDepTree(state.dataset, { + pattern: patternArg, + maxDepth: depth, + includeImplementationDeps: false, + }); + return formatDepTree(tree); + } + + case 'pattern': { + const patternArg = subArgs[0]; + if (patternArg === undefined) { + throw new QueryApiError('INVALID_ARGUMENT', 'Usage: pattern '); + } + const data = state.api.getPattern(patternArg); + return formatOutput(createSuccess(data, state.dataset.counts.total), 'json'); + } + + case 'list': { + const data = state.api.getPatternsByNormalizedStatus('planned'); + return formatOutput( + createSuccess( + data.map((p) => p.patternName ?? 'unknown'), + state.dataset.counts.total + ), + 'json' + ); + } + + default: + return `Unknown REPL command: ${subcommand}\nAvailable: status, overview, context, dep-tree, pattern, list, reload, help, quit`; + } +} + +// ============================================================================= +// REPL Entry Point +// ============================================================================= + +/** + * Start the interactive REPL. Loads the pipeline once and accepts queries on stdin. + * + * When stdin is a pipe (non-TTY), all lines are buffered and processed sequentially. + * The `reload` command is async, so we collect all lines upfront when piped to + * avoid losing buffered input during async operations. + */ +export async function startRepl(opts: ReplOptions): Promise { + console.error('Loading pipeline...'); + const pipeline = await loadPipeline(opts); + const state: ReplState = { + api: createProcessStateAPI(pipeline.dataset), + dataset: pipeline.dataset, + validation: pipeline.validation, + }; + console.error( + `Pipeline loaded: ${state.dataset.counts.total} patterns ` + + `(${state.dataset.counts.completed} completed, ` + + `${state.dataset.counts.active} active, ` + + `${state.dataset.counts.planned} planned)` + ); + + if (process.stdin.isTTY) { + await runInteractiveRepl(state, opts); + } else { + await runPipedRepl(state, opts); + } +} + +/** + * Interactive REPL with prompts (TTY mode). + */ +async function runInteractiveRepl(state: ReplState, opts: ReplOptions): Promise { + const rl = readline.createInterface({ + input: process.stdin, + output: process.stderr, + prompt: 'process-api> ', + }); + + rl.prompt(); + + for await (const line of rl) { + const shouldExit = await processLine(line.trim(), state, opts); + if (shouldExit) break; + rl.prompt(); + } + + rl.close(); +} + +/** + * Piped REPL (non-TTY mode). Collects all lines first, then processes sequentially. + * This prevents losing buffered lines during async operations like reload. + */ +async function runPipedRepl(state: ReplState, opts: ReplOptions): Promise { + const rl = readline.createInterface({ input: process.stdin }); + const lines: string[] = []; + + for await (const line of rl) { + lines.push(line.trim()); + } + + rl.close(); + + for (const trimmed of lines) { + const shouldExit = await processLine(trimmed, state, opts); + if (shouldExit) break; + } +} + +/** + * Process a single REPL input line. Returns true if the REPL should exit. + */ +async function processLine(trimmed: string, state: ReplState, opts: ReplOptions): Promise { + if (trimmed === 'quit' || trimmed === 'exit') { + return true; + } + + if (trimmed === 'reload') { + console.error('Reloading pipeline...'); + try { + const fresh = await loadPipeline(opts); + state.api = createProcessStateAPI(fresh.dataset); + state.dataset = fresh.dataset; + state.validation = fresh.validation; + console.error(`Reloaded: ${state.dataset.counts.total} patterns`); + } catch (err) { + console.error(`Reload failed: ${err instanceof Error ? err.message : String(err)}`); + } + return false; + } + + if (trimmed === 'help') { + console.log('Available commands: status, overview, context, dep-tree, pattern, list'); + console.log('Special: reload, help, quit/exit'); + return false; + } + + if (trimmed === '') { + return false; + } + + try { + const output = dispatchCommand(trimmed, state, opts); + if (output !== '') { + console.log(output); + } + } catch (err) { + if (err instanceof QueryApiError) { + console.log(formatOutput(createError(err.code, err.message), 'json')); + } else { + console.error(`Error: ${err instanceof Error ? err.message : String(err)}`); + } + } + + return false; +} diff --git a/tests/features/cli/data-api-cache.feature b/tests/features/cli/data-api-cache.feature new file mode 100644 index 00000000..630f33e8 --- /dev/null +++ b/tests/features/cli/data-api-cache.feature @@ -0,0 +1,43 @@ +@libar-docs +@libar-docs-pattern:ProcessApiCliCache +@libar-docs-implements:DataAPICLIErgonomics +@libar-docs-status:active +@libar-docs-product-area:DataAPI +@cli @process-api @cache +Feature: Process API CLI - Dataset Cache + MasterDataset caching between CLI invocations: cache hits, mtime invalidation, and --no-cache bypass. + + Background: + Given a temporary working directory + + # ============================================================================ + # RULE 1: Cache Hit on Unchanged Sources + # ============================================================================ + + Rule: MasterDataset is cached between invocations + + **Invariant:** When source files have not changed between CLI invocations, the second invocation must use the cached MasterDataset and report cache.hit as true with reduced pipelineMs. + **Rationale:** The pipeline rebuild costs 2-5 seconds per invocation. Caching eliminates this cost for repeated queries against unchanged sources, which is the common case during interactive AI sessions. + + @happy-path + Scenario: Second query uses cached dataset + Given TypeScript files with pattern annotations + When running status and capturing the first result + And running status and capturing the second result + Then the second result metadata has cache.hit true + And the second result pipelineMs is less than 500 + + @happy-path + Scenario: Cache invalidated on source file change + Given TypeScript files with pattern annotations + When running status and capturing the first result + And a source file mtime is updated + And running status and capturing the second result + Then the second result metadata has cache.hit false + + @happy-path + Scenario: No-cache flag bypasses cache + Given TypeScript files with pattern annotations + When running status and capturing the first result + And running status with --no-cache and capturing the second result + Then the second result metadata has cache.hit false diff --git a/tests/features/cli/data-api-dryrun.feature b/tests/features/cli/data-api-dryrun.feature new file mode 100644 index 00000000..8b151163 --- /dev/null +++ b/tests/features/cli/data-api-dryrun.feature @@ -0,0 +1,28 @@ +@libar-docs +@libar-docs-pattern:ProcessApiCliDryRun +@libar-docs-implements:DataAPICLIErgonomics +@libar-docs-status:active +@libar-docs-product-area:DataAPI +@cli @process-api @dry-run +Feature: Process API CLI - Dry Run + Dry-run mode shows pipeline scope without processing data. + + Background: + Given a temporary working directory + + # ============================================================================ + # RULE 1: Dry-Run Pipeline Scope + # ============================================================================ + + Rule: Dry-run shows pipeline scope without processing + + **Invariant:** The --dry-run flag must display file counts, config status, and cache status without executing the pipeline. Output must contain the DRY RUN marker and must not contain a JSON success envelope. + **Rationale:** Dry-run enables users to verify their input patterns resolve to expected files before committing to the 2-5s pipeline cost, which is especially valuable when debugging glob patterns or config auto-detection. + + @happy-path + Scenario: Dry-run shows file counts + Given TypeScript files with pattern annotations + When running "process-api -i 'src/**/*.ts' --dry-run status" + Then exit code is 0 + And stdout contains dry run marker, file counts, config, and cache status + And stdout does not contain "success" diff --git a/tests/features/cli/data-api-help.feature b/tests/features/cli/data-api-help.feature new file mode 100644 index 00000000..62388303 --- /dev/null +++ b/tests/features/cli/data-api-help.feature @@ -0,0 +1,39 @@ +@libar-docs +@libar-docs-pattern:ProcessApiCliHelp +@libar-docs-implements:DataAPICLIErgonomics +@libar-docs-status:active +@libar-docs-product-area:DataAPI +@cli @process-api @help +Feature: Process API CLI - Per-Subcommand Help + Per-subcommand help displays usage, flags, and examples for individual subcommands. + + Background: + Given a temporary working directory + + # ============================================================================ + # RULE 1: Per-Subcommand Help + # ============================================================================ + + Rule: Per-subcommand help shows usage and flags + + **Invariant:** Running any subcommand with --help must display usage information specific to that subcommand, including applicable flags and examples. Unknown subcommands must fall back to a descriptive message. + **Rationale:** Per-subcommand help replaces the need to scroll through full --help output and provides contextual guidance for subcommand-specific flags like --session. + + @happy-path + Scenario: Per-subcommand help for context + When running "process-api context --help" + Then exit code is 0 + And stdout contains context usage and session flag + And stdout contains "Usage:" + + @happy-path + Scenario: Global help still works + When running "process-api --help" + Then exit code is 0 + And stdout contains "Usage:" + + @validation + Scenario: Unknown subcommand help + When running "process-api foobar --help" + Then exit code is 0 + And stdout contains "No detailed help" diff --git a/tests/features/cli/data-api-metadata.feature b/tests/features/cli/data-api-metadata.feature new file mode 100644 index 00000000..f1be8ff9 --- /dev/null +++ b/tests/features/cli/data-api-metadata.feature @@ -0,0 +1,36 @@ +@libar-docs +@libar-docs-pattern:ProcessApiCliMetadata +@libar-docs-implements:DataAPICLIErgonomics +@libar-docs-status:active +@libar-docs-product-area:DataAPI +@cli @process-api @metadata +Feature: Process API CLI - Response Metadata + Response metadata includes validation summary and pipeline timing for diagnostics. + + Background: + Given a temporary working directory + + # ============================================================================ + # RULE 1: Validation Summary in Metadata + # ============================================================================ + + Rule: Response metadata includes validation summary + + **Invariant:** Every JSON response envelope must include a metadata.validation object with danglingReferenceCount, malformedPatternCount, unknownStatusCount, and warningCount fields, plus a numeric pipelineMs timing. + **Rationale:** Consumers use validation counts to detect annotation quality degradation without running a separate validation pass. Pipeline timing enables performance regression detection in CI. + + @acceptance-criteria @happy-path + Scenario: Validation summary in response metadata + Given TypeScript files with pattern annotations + When running "process-api -i 'src/**/*.ts' status" + Then exit code is 0 + And stdout is valid JSON with key "metadata" + And metadata has a validation object with count fields + And metadata has a numeric pipelineMs field + + @happy-path + Scenario: Pipeline timing in metadata + Given TypeScript files with pattern annotations + When running "process-api -i 'src/**/*.ts' status" + Then exit code is 0 + And metadata has a numeric pipelineMs field diff --git a/tests/features/cli/data-api-repl.feature b/tests/features/cli/data-api-repl.feature new file mode 100644 index 00000000..d14d4760 --- /dev/null +++ b/tests/features/cli/data-api-repl.feature @@ -0,0 +1,51 @@ +@libar-docs +@libar-docs-pattern:ProcessApiCliRepl +@libar-docs-implements:DataAPICLIErgonomics +@libar-docs-status:active +@libar-docs-product-area:DataAPI +@cli @process-api @repl +Feature: Process API CLI - REPL Mode + Interactive REPL mode keeps the pipeline loaded for multi-query sessions and supports reload. + + Background: + Given a temporary working directory + + # ============================================================================ + # RULE 1: Multi-Query Sessions + # ============================================================================ + + Rule: REPL mode accepts multiple queries on a single pipeline load + + **Invariant:** REPL mode loads the pipeline once and accepts multiple queries on stdin, eliminating per-query pipeline overhead. + **Rationale:** Design sessions involve 10-20 exploratory queries in sequence. REPL mode eliminates per-query pipeline overhead entirely. + + @acceptance-criteria @happy-path + Scenario: REPL accepts multiple queries + Given TypeScript files with pattern annotations + When piping "status" then "list" then "quit" to the REPL + Then the REPL output contains status JSON + And the REPL output contains list JSON + And the REPL exits cleanly + + @acceptance-criteria @happy-path + Scenario: REPL shows help output + Given TypeScript files with pattern annotations + When piping "help" then "quit" to the REPL + Then the REPL output contains available commands + + # ============================================================================ + # RULE 2: Pipeline Reload + # ============================================================================ + + Rule: REPL reload rebuilds the pipeline from fresh sources + + **Invariant:** The reload command rebuilds the pipeline from fresh sources and subsequent queries use the new dataset. + **Rationale:** During implementation sessions, source files change frequently. Reload allows refreshing without restarting the REPL. + + @acceptance-criteria @happy-path + Scenario: REPL reloads pipeline on command + Given TypeScript files with pattern annotations + When piping "status" then "reload" then "status" then "quit" to the REPL + Then the REPL stderr contains "Reloading pipeline" + And the REPL stderr contains "Reloaded" + And the REPL output contains two status responses diff --git a/tests/steps/cli/data-api-cache.steps.ts b/tests/steps/cli/data-api-cache.steps.ts new file mode 100644 index 00000000..f73eba71 --- /dev/null +++ b/tests/steps/cli/data-api-cache.steps.ts @@ -0,0 +1,199 @@ +/** + * Data API CLI Cache Step Definitions + * + * BDD step definitions for testing MasterDataset caching + * between CLI invocations: cache hits, mtime invalidation, + * and --no-cache bypass. + * + * @libar-docs + * @libar-docs-implements DataAPICLIErgonomics + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { loadFeature, describeFeature } from '@amiceli/vitest-cucumber'; +import { expect } from 'vitest'; +import { + type CLITestState, + type CLIResult, + initState, + getTempDir, + runCLICommand, + getResult, + writePatternFiles, + createTempDir, +} from '../../support/helpers/process-api-state.js'; + +// ============================================================================= +// Extended State for Cache Tests +// ============================================================================= + +interface CacheTestState extends CLITestState { + firstResult: CLIResult | null; + secondResult: CLIResult | null; +} + +function initCacheState(): CacheTestState { + const base = initState(); + return { + ...base, + firstResult: null, + secondResult: null, + }; +} + +function getCacheState(state: CacheTestState | null): CacheTestState { + if (!state) throw new Error('Cache test state not initialized'); + return state; +} + +// ============================================================================= +// JSON Metadata Parsing +// ============================================================================= + +interface ParsedMetadata { + cache?: { + hit: boolean; + ageMs?: number; + }; + pipelineMs?: number; +} + +function parseMetadata(result: CLIResult): ParsedMetadata { + const parsed = JSON.parse(result.stdout) as { metadata?: ParsedMetadata }; + if (!parsed.metadata) { + throw new Error('No metadata in response JSON'); + } + return parsed.metadata; +} + +// ============================================================================= +// Module-level state (reset per scenario) +// ============================================================================= + +let state: CacheTestState | null = null; + +// ============================================================================= +// Feature Definition +// ============================================================================= + +const feature = await loadFeature('tests/features/cli/data-api-cache.feature'); + +describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { + // --------------------------------------------------------------------------- + // Cleanup + // --------------------------------------------------------------------------- + + AfterEachScenario(async () => { + if (state?.tempContext) { + await state.tempContext.cleanup(); + } + state = null; + }); + + // --------------------------------------------------------------------------- + // Background + // --------------------------------------------------------------------------- + + Background(({ Given }) => { + Given('a temporary working directory', async () => { + state = initCacheState(); + state.tempContext = await createTempDir({ prefix: 'cli-cache-test-' }); + }); + }); + + // --------------------------------------------------------------------------- + // Rule: MasterDataset is cached between invocations + // --------------------------------------------------------------------------- + + Rule('MasterDataset is cached between invocations', ({ RuleScenario }) => { + RuleScenario('Second query uses cached dataset', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('running status and capturing the first result', async () => { + await runCLICommand(state, "process-api -i 'src/**/*.ts' status"); + getCacheState(state).firstResult = getResult(state); + }); + + And('running status and capturing the second result', async () => { + // Reset result before the second run + getCacheState(state).result = null; + await runCLICommand(state, "process-api -i 'src/**/*.ts' status"); + getCacheState(state).secondResult = getResult(state); + }); + + Then('the second result metadata has cache.hit true', () => { + const s = getCacheState(state); + const metadata = parseMetadata(s.secondResult!); + expect(metadata.cache).toBeDefined(); + expect(metadata.cache!.hit).toBe(true); + }); + + And('the second result pipelineMs is less than 500', () => { + const s = getCacheState(state); + const metadata = parseMetadata(s.secondResult!); + expect(metadata.pipelineMs).toBeDefined(); + expect(metadata.pipelineMs!).toBeLessThan(500); + }); + }); + + RuleScenario('Cache invalidated on source file change', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('running status and capturing the first result', async () => { + await runCLICommand(state, "process-api -i 'src/**/*.ts' status"); + getCacheState(state).firstResult = getResult(state); + }); + + And('a source file mtime is updated', () => { + const dir = getTempDir(state); + const filePath = path.join(dir, 'src', 'completed.ts'); + // Advance mtime by 2 seconds to ensure cache key changes + const now = new Date(); + const future = new Date(now.getTime() + 2000); + fs.utimesSync(filePath, future, future); + }); + + And('running status and capturing the second result', async () => { + getCacheState(state).result = null; + await runCLICommand(state, "process-api -i 'src/**/*.ts' status"); + getCacheState(state).secondResult = getResult(state); + }); + + Then('the second result metadata has cache.hit false', () => { + const s = getCacheState(state); + const metadata = parseMetadata(s.secondResult!); + expect(metadata.cache).toBeDefined(); + expect(metadata.cache!.hit).toBe(false); + }); + }); + + RuleScenario('No-cache flag bypasses cache', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('running status and capturing the first result', async () => { + await runCLICommand(state, "process-api -i 'src/**/*.ts' status"); + getCacheState(state).firstResult = getResult(state); + }); + + And('running status with --no-cache and capturing the second result', async () => { + getCacheState(state).result = null; + await runCLICommand(state, "process-api -i 'src/**/*.ts' --no-cache status"); + getCacheState(state).secondResult = getResult(state); + }); + + Then('the second result metadata has cache.hit false', () => { + const s = getCacheState(state); + const metadata = parseMetadata(s.secondResult!); + expect(metadata.cache).toBeDefined(); + expect(metadata.cache!.hit).toBe(false); + }); + }); + }); +}); diff --git a/tests/steps/cli/data-api-dryrun.steps.ts b/tests/steps/cli/data-api-dryrun.steps.ts new file mode 100644 index 00000000..474b87b5 --- /dev/null +++ b/tests/steps/cli/data-api-dryrun.steps.ts @@ -0,0 +1,88 @@ +/** + * Data API CLI Dry Run Step Definitions + * + * BDD step definitions for testing --dry-run mode: + * pipeline scope display without processing. + * + * @libar-docs + * @libar-docs-implements DataAPICLIErgonomics + */ + +import { loadFeature, describeFeature } from '@amiceli/vitest-cucumber'; +import { expect } from 'vitest'; +import { + type CLITestState, + initState, + getResult, + runCLICommand, + writePatternFiles, + createTempDir, +} from '../../support/helpers/process-api-state.js'; + +// ============================================================================= +// Module-level state (reset per scenario) +// ============================================================================= + +let state: CLITestState | null = null; + +// ============================================================================= +// Feature Definition +// ============================================================================= + +const feature = await loadFeature('tests/features/cli/data-api-dryrun.feature'); + +describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { + // --------------------------------------------------------------------------- + // Cleanup + // --------------------------------------------------------------------------- + + AfterEachScenario(async () => { + if (state?.tempContext) { + await state.tempContext.cleanup(); + } + state = null; + }); + + // --------------------------------------------------------------------------- + // Background + // --------------------------------------------------------------------------- + + Background(({ Given }) => { + Given('a temporary working directory', async () => { + state = initState(); + state.tempContext = await createTempDir({ prefix: 'cli-dryrun-test-' }); + }); + }); + + // --------------------------------------------------------------------------- + // Rule: Dry-run shows pipeline scope without processing + // --------------------------------------------------------------------------- + + Rule('Dry-run shows pipeline scope without processing', ({ RuleScenario }) => { + RuleScenario('Dry-run shows file counts', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('running {string}', async (_ctx: unknown, cmd: string) => { + await runCLICommand(state, cmd); + }); + + Then('exit code is {int}', (_ctx: unknown, code: number) => { + expect(getResult(state).exitCode).toBe(code); + }); + + And('stdout contains dry run marker, file counts, config, and cache status', () => { + const stdout = getResult(state).stdout; + expect(stdout).toContain('DRY RUN'); + expect(stdout).toContain('TypeScript files:'); + expect(stdout).toContain('Config:'); + expect(stdout).toContain('Cache:'); + }); + + And('stdout does not contain {string}', (_ctx: unknown, text: string) => { + expect(getResult(state).stdout).not.toContain(text); + }); + }); + }); +}); diff --git a/tests/steps/cli/data-api-help.steps.ts b/tests/steps/cli/data-api-help.steps.ts new file mode 100644 index 00000000..99f23af5 --- /dev/null +++ b/tests/steps/cli/data-api-help.steps.ts @@ -0,0 +1,109 @@ +/** + * Data API CLI Per-Subcommand Help Step Definitions + * + * BDD step definitions for testing per-subcommand help output, + * global help, and unknown subcommand help fallback. + * + * @libar-docs + * @libar-docs-implements DataAPICLIErgonomics + */ + +import { loadFeature, describeFeature } from '@amiceli/vitest-cucumber'; +import { expect } from 'vitest'; +import { + type CLITestState, + initState, + getResult, + runCLICommand, + createTempDir, +} from '../../support/helpers/process-api-state.js'; + +// ============================================================================= +// Module-level state (reset per scenario) +// ============================================================================= + +let state: CLITestState | null = null; + +// ============================================================================= +// Feature Definition +// ============================================================================= + +const feature = await loadFeature('tests/features/cli/data-api-help.feature'); + +describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { + // --------------------------------------------------------------------------- + // Cleanup + // --------------------------------------------------------------------------- + + AfterEachScenario(async () => { + if (state?.tempContext) { + await state.tempContext.cleanup(); + } + state = null; + }); + + // --------------------------------------------------------------------------- + // Background + // --------------------------------------------------------------------------- + + Background(({ Given }) => { + Given('a temporary working directory', async () => { + state = initState(); + state.tempContext = await createTempDir({ prefix: 'cli-help-test-' }); + }); + }); + + // --------------------------------------------------------------------------- + // Rule: Per-subcommand help shows usage and flags + // --------------------------------------------------------------------------- + + Rule('Per-subcommand help shows usage and flags', ({ RuleScenario }) => { + RuleScenario('Per-subcommand help for context', ({ When, Then, And }) => { + When('running {string}', async (_ctx: unknown, cmd: string) => { + await runCLICommand(state, cmd); + }); + + Then('exit code is {int}', (_ctx: unknown, code: number) => { + expect(getResult(state).exitCode).toBe(code); + }); + + And('stdout contains context usage and session flag', () => { + const stdout = getResult(state).stdout; + expect(stdout).toContain('context'); + expect(stdout).toContain('--session'); + }); + + And('stdout contains {string}', (_ctx: unknown, text: string) => { + expect(getResult(state).stdout).toContain(text); + }); + }); + + RuleScenario('Global help still works', ({ When, Then, And }) => { + When('running {string}', async (_ctx: unknown, cmd: string) => { + await runCLICommand(state, cmd); + }); + + Then('exit code is {int}', (_ctx: unknown, code: number) => { + expect(getResult(state).exitCode).toBe(code); + }); + + And('stdout contains {string}', (_ctx: unknown, text: string) => { + expect(getResult(state).stdout).toContain(text); + }); + }); + + RuleScenario('Unknown subcommand help', ({ When, Then, And }) => { + When('running {string}', async (_ctx: unknown, cmd: string) => { + await runCLICommand(state, cmd); + }); + + Then('exit code is {int}', (_ctx: unknown, code: number) => { + expect(getResult(state).exitCode).toBe(code); + }); + + And('stdout contains {string}', (_ctx: unknown, text: string) => { + expect(getResult(state).stdout).toContain(text); + }); + }); + }); +}); diff --git a/tests/steps/cli/data-api-metadata.steps.ts b/tests/steps/cli/data-api-metadata.steps.ts new file mode 100644 index 00000000..24efea90 --- /dev/null +++ b/tests/steps/cli/data-api-metadata.steps.ts @@ -0,0 +1,147 @@ +/** + * Data API CLI Metadata Step Definitions + * + * BDD step definitions for testing response metadata: + * validation summary counts and pipeline timing. + * + * @libar-docs + * @libar-docs-implements DataAPICLIErgonomics + */ + +import { loadFeature, describeFeature } from '@amiceli/vitest-cucumber'; +import { expect } from 'vitest'; +import { + type CLITestState, + initState, + getResult, + runCLICommand, + writePatternFiles, + createTempDir, +} from '../../support/helpers/process-api-state.js'; + +// ============================================================================= +// JSON Metadata Parsing +// ============================================================================= + +interface ValidationMetadata { + danglingReferenceCount: number; + malformedPatternCount: number; + unknownStatusCount: number; + warningCount: number; +} + +interface ResponseMetadata { + validation?: ValidationMetadata; + pipelineMs?: number; + cache?: { + hit: boolean; + ageMs?: number; + }; +} + +function parseResponseMetadata(stdout: string): ResponseMetadata { + const parsed = JSON.parse(stdout) as { metadata?: ResponseMetadata }; + if (!parsed.metadata) { + throw new Error('No metadata in response JSON'); + } + return parsed.metadata; +} + +// ============================================================================= +// Module-level state (reset per scenario) +// ============================================================================= + +let state: CLITestState | null = null; + +// ============================================================================= +// Feature Definition +// ============================================================================= + +const feature = await loadFeature('tests/features/cli/data-api-metadata.feature'); + +describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { + // --------------------------------------------------------------------------- + // Cleanup + // --------------------------------------------------------------------------- + + AfterEachScenario(async () => { + if (state?.tempContext) { + await state.tempContext.cleanup(); + } + state = null; + }); + + // --------------------------------------------------------------------------- + // Background + // --------------------------------------------------------------------------- + + Background(({ Given }) => { + Given('a temporary working directory', async () => { + state = initState(); + state.tempContext = await createTempDir({ prefix: 'cli-metadata-test-' }); + }); + }); + + // --------------------------------------------------------------------------- + // Rule: Response metadata includes validation summary + // --------------------------------------------------------------------------- + + Rule('Response metadata includes validation summary', ({ RuleScenario }) => { + RuleScenario('Validation summary in response metadata', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('running {string}', async (_ctx: unknown, cmd: string) => { + await runCLICommand(state, cmd); + }); + + Then('exit code is {int}', (_ctx: unknown, code: number) => { + expect(getResult(state).exitCode).toBe(code); + }); + + And('stdout is valid JSON with key {string}', (_ctx: unknown, key: string) => { + const result = getResult(state); + const parsed = JSON.parse(result.stdout) as Record; + expect(parsed).toHaveProperty(key); + }); + + And('metadata has a validation object with count fields', () => { + const metadata = parseResponseMetadata(getResult(state).stdout); + expect(metadata.validation).toBeDefined(); + expect(typeof metadata.validation!.danglingReferenceCount).toBe('number'); + expect(typeof metadata.validation!.malformedPatternCount).toBe('number'); + expect(typeof metadata.validation!.unknownStatusCount).toBe('number'); + expect(typeof metadata.validation!.warningCount).toBe('number'); + }); + + And('metadata has a numeric pipelineMs field', () => { + const metadata = parseResponseMetadata(getResult(state).stdout); + expect(metadata.pipelineMs).toBeDefined(); + expect(typeof metadata.pipelineMs).toBe('number'); + expect(metadata.pipelineMs!).toBeGreaterThanOrEqual(0); + }); + }); + + RuleScenario('Pipeline timing in metadata', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('running {string}', async (_ctx: unknown, cmd: string) => { + await runCLICommand(state, cmd); + }); + + Then('exit code is {int}', (_ctx: unknown, code: number) => { + expect(getResult(state).exitCode).toBe(code); + }); + + And('metadata has a numeric pipelineMs field', () => { + const metadata = parseResponseMetadata(getResult(state).stdout); + expect(metadata.pipelineMs).toBeDefined(); + expect(typeof metadata.pipelineMs).toBe('number'); + expect(metadata.pipelineMs!).toBeGreaterThanOrEqual(0); + }); + }); + }); +}); diff --git a/tests/steps/cli/data-api-repl.steps.ts b/tests/steps/cli/data-api-repl.steps.ts new file mode 100644 index 00000000..ad722897 --- /dev/null +++ b/tests/steps/cli/data-api-repl.steps.ts @@ -0,0 +1,191 @@ +/** + * Data API CLI REPL Step Definitions + * + * BDD step definitions for testing the interactive REPL mode: + * multi-query sessions, help output, and pipeline reload. + * + * @libar-docs + * @libar-docs-implements DataAPICLIErgonomics + */ + +import { loadFeature, describeFeature } from '@amiceli/vitest-cucumber'; +import { expect } from 'vitest'; +import { + type CLITestState, + type CLIResult, + initState, + writePatternFiles, + createTempDir, +} from '../../support/helpers/process-api-state.js'; +import { runCLI } from '../../support/helpers/cli-runner.js'; + +// ============================================================================= +// Extended State for REPL Tests +// ============================================================================= + +interface ReplTestState extends CLITestState { + replResult: CLIResult | null; +} + +function initReplState(): ReplTestState { + const base = initState(); + return { + ...base, + replResult: null, + }; +} + +function getReplState(state: ReplTestState | null): ReplTestState { + if (!state) throw new Error('REPL test state not initialized'); + return state; +} + +function getTempDir(state: ReplTestState | null): string { + const s = getReplState(state); + if (!s.tempContext) throw new Error('Temp context not initialized'); + return s.tempContext.tempDir; +} + +function getReplResult(state: ReplTestState | null): CLIResult { + const s = getReplState(state); + if (!s.replResult) throw new Error('REPL result not available'); + return s.replResult; +} + +// ============================================================================= +// REPL Runner Helper +// ============================================================================= + +async function runRepl(state: ReplTestState | null, commands: string[]): Promise { + const s = getReplState(state); + const stdinData = commands.join('\n') + '\n'; + s.replResult = await runCLI('process-api', ['-i', "'src/**/*.ts'", 'repl'], { + cwd: getTempDir(state), + timeout: 30000, + stdin: stdinData, + }); +} + +// ============================================================================= +// Module-level state (reset per scenario) +// ============================================================================= + +let state: ReplTestState | null = null; + +// ============================================================================= +// Feature Definition +// ============================================================================= + +const feature = await loadFeature('tests/features/cli/data-api-repl.feature'); + +describeFeature(feature, ({ Background, Rule, AfterEachScenario }) => { + // --------------------------------------------------------------------------- + // Cleanup + // --------------------------------------------------------------------------- + + AfterEachScenario(async () => { + if (state?.tempContext) { + await state.tempContext.cleanup(); + } + state = null; + }); + + // --------------------------------------------------------------------------- + // Background + // --------------------------------------------------------------------------- + + Background(({ Given }) => { + Given('a temporary working directory', async () => { + state = initReplState(); + state.tempContext = await createTempDir({ prefix: 'cli-repl-test-' }); + }); + }); + + // --------------------------------------------------------------------------- + // Rule: REPL mode accepts multiple queries on a single pipeline load + // --------------------------------------------------------------------------- + + Rule('REPL mode accepts multiple queries on a single pipeline load', ({ RuleScenario }) => { + RuleScenario('REPL accepts multiple queries', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('piping "status" then "list" then "quit" to the REPL', async () => { + await runRepl(state, ['status', 'list', 'quit']); + }); + + Then('the REPL output contains status JSON', () => { + const result = getReplResult(state); + // status command outputs pretty-printed JSON with success and data fields + expect(result.stdout).toContain('"success": true'); + expect(result.stdout).toContain('"counts"'); + }); + + And('the REPL output contains list JSON', () => { + const result = getReplResult(state); + // list command outputs JSON with pattern names + expect(result.stdout).toContain('"RoadmapPattern"'); + }); + + And('the REPL exits cleanly', () => { + const result = getReplResult(state); + // REPL should exit with code 0 after quit + expect(result.exitCode).toBe(0); + }); + }); + + RuleScenario('REPL shows help output', ({ Given, When, Then }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('piping "help" then "quit" to the REPL', async () => { + await runRepl(state, ['help', 'quit']); + }); + + Then('the REPL output contains available commands', () => { + const result = getReplResult(state); + // help goes to stdout + expect(result.stdout).toContain('status'); + expect(result.stdout).toContain('context'); + expect(result.stdout).toContain('dep-tree'); + }); + }); + }); + + // --------------------------------------------------------------------------- + // Rule: REPL reload rebuilds the pipeline from fresh sources + // --------------------------------------------------------------------------- + + Rule('REPL reload rebuilds the pipeline from fresh sources', ({ RuleScenario }) => { + RuleScenario('REPL reloads pipeline on command', ({ Given, When, Then, And }) => { + Given('TypeScript files with pattern annotations', async () => { + await writePatternFiles(state); + }); + + When('piping "status" then "reload" then "status" then "quit" to the REPL', async () => { + await runRepl(state, ['status', 'reload', 'status', 'quit']); + }); + + Then('the REPL stderr contains "Reloading pipeline"', () => { + const result = getReplResult(state); + expect(result.stderr).toContain('Reloading pipeline'); + }); + + And('the REPL stderr contains "Reloaded"', () => { + const result = getReplResult(state); + expect(result.stderr).toContain('Reloaded'); + }); + + And('the REPL output contains two status responses', () => { + const result = getReplResult(state); + // Both status commands produce JSON with success:true + // Count occurrences of "success": true β€” should be at least 2 + const matches = result.stdout.match(/"success": true/g); + expect(matches).not.toBeNull(); + expect(matches!.length).toBeGreaterThanOrEqual(2); + }); + }); + }); +}); diff --git a/tests/support/helpers/cli-runner.ts b/tests/support/helpers/cli-runner.ts index d48849ba..52d398ac 100644 --- a/tests/support/helpers/cli-runner.ts +++ b/tests/support/helpers/cli-runner.ts @@ -37,6 +37,8 @@ export interface CLIOptions { env?: NodeJS.ProcessEnv; /** Timeout in milliseconds (default: 30000) */ timeout?: number; + /** Data to pipe to stdin (closes stdin after writing) */ + stdin?: string; } // ============================================================================= @@ -92,7 +94,12 @@ export async function runCLI( args: string[], options: CLIOptions = {} ): Promise { - const { cwd = process.cwd(), env = process.env, timeout = DEFAULT_TIMEOUT } = options; + const { + cwd = process.cwd(), + env = process.env, + timeout = DEFAULT_TIMEOUT, + stdin: stdinData, + } = options; const cliPath = getCLIPath(cliName); @@ -103,6 +110,16 @@ export async function runCLI( shell: true, }); + // Pipe stdin data if provided, then close stdin + if (stdinData !== undefined) { + child.stdin.on('error', (error: NodeJS.ErrnoException) => { + if (error.code !== 'EPIPE') { + reject(error); + } + }); + child.stdin.end(stdinData); + } + let stdout = ''; let stderr = ''; let timedOut = false;