From 10ac223cd1868b7246a219c532a7123216fb48fc Mon Sep 17 00:00:00 2001 From: Osho Emmanuel Date: Wed, 4 Mar 2026 20:31:43 +0100 Subject: [PATCH 1/7] chore: bump version to 2.5.1 and add prepare scripts --- package.json | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/package.json b/package.json index 838f006..9cfa8fc 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "vectorlint", - "version": "2.4.0", + "version": "2.5.1", "description": "An LLM-based prose linter that lets you enforce your style guide in one prompt", "type": "module", "main": "dist/index.js", @@ -12,7 +12,9 @@ "lint:fix": "eslint . --fix", "test": "vitest", "test:run": "vitest run", - "test:ci": "vitest run --coverage" + "test:ci": "vitest run --coverage", + "prepare": "npm run build", + "prepublishOnly": "npm run build" }, "bin": { "vectorlint": "./dist/index.js", @@ -89,4 +91,4 @@ "typescript-eslint": "^8.46.1", "vitest": "^2.0.0" } -} +} \ No newline at end of file From c2d2a94f5e1b98d40a1d17871ca797bf7ace85be Mon Sep 17 00:00:00 2001 From: Osho Emmanuel Date: Wed, 22 Apr 2026 08:20:18 +0100 Subject: [PATCH 2/7] feat(skills): add agentic content review skill - Add a project-level Codex skill for VectorLint-style content review. - Include per-rule reviewer directives, finding templates, and default rules. - Add rule discovery and parser/scorer scripts for grounded findings. --- .codex/skills/agentic-content-review/SKILL.md | 62 +++ .../agentic-content-review/agents/openai.yaml | 7 + .../references/finding-template.md | 37 ++ .../references/reviewer-directive.md | 67 +++ .../references/rule-index-format.md | 23 + .../references/subagent-adapters.md | 42 ++ .../rules/default/ai-patterns.md | 7 + .../rules/default/directness.md | 7 + .../rules/default/rule-index.yml | 18 + .../rules/default/unsupported-claims.md | 7 + .../scripts/list-active-rules.ts | 191 ++++++++ .../scripts/parse-review-and-score.ts | 436 ++++++++++++++++++ 12 files changed, 904 insertions(+) create mode 100644 .codex/skills/agentic-content-review/SKILL.md create mode 100644 .codex/skills/agentic-content-review/agents/openai.yaml create mode 100644 .codex/skills/agentic-content-review/references/finding-template.md create mode 100644 .codex/skills/agentic-content-review/references/reviewer-directive.md create mode 100644 .codex/skills/agentic-content-review/references/rule-index-format.md create mode 100644 .codex/skills/agentic-content-review/references/subagent-adapters.md create mode 100644 .codex/skills/agentic-content-review/rules/default/ai-patterns.md create mode 100644 .codex/skills/agentic-content-review/rules/default/directness.md create mode 100644 .codex/skills/agentic-content-review/rules/default/rule-index.yml create mode 100644 .codex/skills/agentic-content-review/rules/default/unsupported-claims.md create mode 100755 .codex/skills/agentic-content-review/scripts/list-active-rules.ts create mode 100755 .codex/skills/agentic-content-review/scripts/parse-review-and-score.ts diff --git a/.codex/skills/agentic-content-review/SKILL.md b/.codex/skills/agentic-content-review/SKILL.md new file mode 100644 index 0000000..6f2f12f --- /dev/null +++ b/.codex/skills/agentic-content-review/SKILL.md @@ -0,0 +1,62 @@ +--- +name: agentic-content-review +description: Agentic VectorLint-style content review using active markdown rules, one-rule-per-subagent delegation, parser-clean markdown findings, exact quote grounding, and density scoring. Use when reviewing docs, marketing copy, specs, PR descriptions, implementation artifacts, or other prose/content files against workspace rules in .vlint/rules or skill-bundled default rules. +--- + +# Agentic Content Review + +Use this skill to review prose and other content files against active markdown rules with an agent-native workflow. + +## Review Model + +- Delegate one reviewer subagent per active rule per source file. +- Use path-only delegation: provide source and rule file paths, and let the reviewer read the files directly. +- Have each reviewer read the source file and exactly one rule file itself. +- Optimize the reviewer pass for high recall, then use the main-agent audit to remove unsupported findings. +- Require each reviewer to run parser and scorer checks before returning. + +## Core Definitions + +- `parser-clean` means all required finding fields are present; source and rule files exist; the Evidence quote is found in the source file; the Rule quote is found in the rule file; Context supports violation is `true` or `false`; Confidence is numeric from `0.0` to `1.0`; Line is numeric and plausible; and score computation completes without errors. +- `same-anchor groups` means findings that share source file, rule path, Evidence quote, Rule quote, and Line. Treat them as semantic-review warnings only, never as duplicate decisions. +- Density scoring is strict by default, based on finding count divided by word count, clamped to a `0-10` range. Recompute the final score after main-agent edits. + +## Workflow + +1. Resolve active rules from workspace `.vlint/rules/*/rule-index.yml` files, or from bundled defaults at `.codex/skills/agentic-content-review/rules/default/rule-index.yml` when no workspace indexes exist or the caller explicitly asks for defaults. +2. Pair each source file with every active rule. +3. Dispatch one reviewer subagent for each file-and-rule pair. +4. Collect markdown findings with exact Evidence quotes and Rule quotes. +5. Run parser and scorer validation on every reviewer response. +6. Treat same-anchor groups as semantic review warnings, not duplicate decisions. +7. Consolidate into a scored report with findings, warnings, and a short recommendation. + +## Output Rules +- Use the empty-findings form from `references/finding-template.md` when the reviewer finds no issues. +- Keep findings structurally consistent with `references/finding-template.md`. +- Ground every finding in exact Evidence and Rule quotes. +- Keep final claims grounded in exact Evidence and Rule quotes plus the main-agent semantic audit. +- Use `references/rule-index-format.md` to resolve active rules from YAML pack indexes. +- Use `references/reviewer-directive.md` to prompt reviewer subagents. +- Use `references/subagent-adapters.md` to normalize subagent output. + + +## Commands + +List active rules: + +```bash +npx tsx .codex/skills/agentic-content-review/scripts/list-active-rules.ts +``` + +Use bundled defaults even when workspace rules exist: + +```bash +npx tsx .codex/skills/agentic-content-review/scripts/list-active-rules.ts --include-defaults +``` + +Parse and score review markdown: + +```bash +npx tsx .codex/skills/agentic-content-review/scripts/parse-review-and-score.ts +``` diff --git a/.codex/skills/agentic-content-review/agents/openai.yaml b/.codex/skills/agentic-content-review/agents/openai.yaml new file mode 100644 index 0000000..33ac03e --- /dev/null +++ b/.codex/skills/agentic-content-review/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "Agentic Content Review" + short_description: "Per-rule agent review with scoring" + default_prompt: "Use $agentic-content-review to review this content with active .vlint rules and return a scored report." + +policy: + allow_implicit_invocation: true diff --git a/.codex/skills/agentic-content-review/references/finding-template.md b/.codex/skills/agentic-content-review/references/finding-template.md new file mode 100644 index 0000000..ee62736 --- /dev/null +++ b/.codex/skills/agentic-content-review/references/finding-template.md @@ -0,0 +1,37 @@ +# Finding Template + +Use this exact field set for each markdown finding. Replace placeholder text with actual values. + +```md +### Finding +- Rule path: `relative/path/to/rule.md` +- Source file: `relative/or/absolute/path.md` +- Line: `source line number where the evidence quote begins` +- Evidence quote: `exact source text supporting the finding` +- Rule quote: `exact text copied from the rule file` +- Flag reasoning: `why the evidence, rule, context, and plausible non-violation make this worth flagging` +- Issue: `one sentence describing the problem` +- Plausible non-violation: `one sentence describing the best benign interpretation` +- Context supports violation: `true or false` +- Suggestion: `one short sentence describing how to fix the issue` +- Confidence: `number from 0.0 to 1.0 reflecting certainty this is a genuine violation` +``` + +## Field Meanings + +- `Flag reasoning`: concise explanation of why the finding should be surfaced after considering the evidence quote, rule quote, surrounding context, and plausible non-violation. +- `Plausible non-violation`: the strongest benign interpretation to consider after identifying the issue. +- `Context supports violation`: `true` when the surrounding source/workspace context strengthens the violation claim after considering the plausible non-violation; `false` when the context weakens or undermines the claim. +- `Suggestion`: one short sentence describing how to fix the issue. This maps to VectorLint's `suggestion` field. +- `Confidence`: number from `0.0` to `1.0` reflecting certainty that this is a genuine violation after considering rule support, evidence, context, and plausible non-violation. + +## Empty Findings + +If a reviewer has no findings, return: + +```md +## Findings +No findings. +``` + +This empty-findings form is the only accepted response shape that does not contain `### Finding` blocks. diff --git a/.codex/skills/agentic-content-review/references/reviewer-directive.md b/.codex/skills/agentic-content-review/references/reviewer-directive.md new file mode 100644 index 0000000..0f8edab --- /dev/null +++ b/.codex/skills/agentic-content-review/references/reviewer-directive.md @@ -0,0 +1,67 @@ +# Reviewer Directive + +Use this directive for each reviewer subagent. + +## Role + +You are a VectorLint-style content reviewer assigned to one active rule and one source file. + +## VectorLint Directive + +Your goal is to flag every instance that matches the assigned rule. A report with zero findings is correct only when the content contains no matching violations. A report with findings is correct when the content contains those violations. Accuracy means flagging what is present and only what is present. + +The assigned rule is the only criterion for flagging findings. Flag every instance that matches the rule's pattern. Leave out text that does not match the assigned rule. Use the rule file as the source of rule support; cite the assigned rule text, not this directive, as the reason for a finding. + +Interpret source structure from the file type and content structure. In structured formats such as MDX, cards, tabs, and list items can be independent content units rather than continuous prose. In plain markdown or text files, prose is usually continuous unless headings, horizontal rules, or other structure separate it. When applying proximity-based rules in structured formats, evaluate structural elements independently unless the rule explicitly requires cross-element evaluation. + +Each finding has two jobs: + +1. Identify a candidate violation. +2. Record the grounding and gate-check information that lets the parser and main agent decide whether it should be surfaced. + +For each candidate, make these checks visible through the template fields: + +- Rule support: the assigned rule quote supports the finding. +- Evidence exactness: the evidence quote is copied exactly from the assigned source file. +- Context support: surrounding source/workspace context supports the violation after considering acceptable uses. +- Plausible non-violation: the strongest benign reading is stated directly. +- Suggestion: one short sentence describes how to fix the issue. + +## Inputs + +- Read the source file from its path. +- Read the assigned rule file from its path; this is the only rule being evaluated in this reviewer pass. +- Start from the assigned source file and rule file, then read any additional workspace context that materially improves judgment: linked files, imported files, neighboring documentation, definitions, examples, or project conventions. +- Use the assigned source file for every `Evidence quote` and the assigned rule file for every `Rule quote`. + +## Task + +- Inspect the full source file with high recall, using additional context to distinguish genuine violations from acceptable uses. +- Find every rule-matching issue you can justify from the text. +- Ground each finding in an exact Evidence quote copied from the source file. +- Ground each finding in an exact Rule quote copied from the rule file. +- Use the source file's actual line numbers. The `Line` value is where the evidence quote begins. +- Ensure the Evidence quote, Rule quote, and Line are all parser-clean before returning. +- Provide `Suggestion` as one short sentence describing how to fix the issue. +- Return parser-safe markdown rather than freeform commentary. +- Run the parser/scorer checks before returning. + +## Confidence Calibration + +Assign `Confidence` after every other finding field is written. + +- `0.75-1.0`: the violation is demonstrable from the text and rule without assumptions about intent or missing context. +- `0.50-0.74`: the text fits the rule pattern, but one reasoning step is needed to connect the text to the rule. +- `0.25-0.49`: the text has multiple plausible interpretations, including both violating and acceptable readings. +- `0.00-0.24`: the violation claim depends mostly on assumptions not present in the text or rule. + +## Return Format + +- Return findings using the exact fields from `finding-template.md`. +- Generate each finding in the template order: anchors first, flag reasoning before the issue label, plausible non-violation after the issue, suggestion before confidence, and confidence last. +- If no findings exist, return the empty findings form from the template. +- The empty findings form is the only accepted response shape that does not contain `### Finding` blocks. +- Use the template fields only; leave out severity and any other unlisted fields. +- Preserve separate findings for separate issues, even when findings share the same anchor. +- Keep the output parser-clean before returning. +- If multiple findings share the same anchor, keep them distinct and let consolidation mark the group as a semantic warning. diff --git a/.codex/skills/agentic-content-review/references/rule-index-format.md b/.codex/skills/agentic-content-review/references/rule-index-format.md new file mode 100644 index 0000000..b1dd001 --- /dev/null +++ b/.codex/skills/agentic-content-review/references/rule-index-format.md @@ -0,0 +1,23 @@ +# Rule Index Format + +Use YAML rule indexes stored at `.vlint/rules//rule-index.yml`. +Bundled defaults live at `.codex/skills/agentic-content-review/rules/default/rule-index.yml`. + +## Shape + +```yaml +pack: brand +active: true +rules: + - id: voice + name: Brand Voice + path: voice.md + active: true + description: Check whether content matches brand tone and voice. +``` + +## Selection Rules + +- Dispatch reviewers only for rules marked `active: true`. +- Resolve the rule file relative to the pack directory before creating the subagent prompt. +- Use bundled defaults only when no workspace `.vlint/rules/*/rule-index.yml` exists or the caller explicitly asks for defaults. diff --git a/.codex/skills/agentic-content-review/references/subagent-adapters.md b/.codex/skills/agentic-content-review/references/subagent-adapters.md new file mode 100644 index 0000000..0dd7b3f --- /dev/null +++ b/.codex/skills/agentic-content-review/references/subagent-adapters.md @@ -0,0 +1,42 @@ +# Subagent Adapters + +Use an adapter layer to normalize reviewer prompts and responses. + +## Prompt Contract + +Provide each reviewer with: + +- `source_file` +- `rule_id` +- `rule_file` +- any minimal scope hint from the rule index + +Use path-only delegation: provide file paths rather than copied content blocks, partial excerpts, or summaries of the source text. + +## Dispatch Rules + +- Create one subagent per active rule per source file. +- Read the assigned source file and assigned rule file inside the subagent. +- Let the subagent read additional workspace context when it materially improves rule judgment. +- Keep the reviewer prompt short and specific. +- Create a fresh subagent for each file-and-rule pair. + +## Core Checks + +- `parser-clean` means required fields are present; source and rule files exist; the Evidence quote is found in the source file; the Rule quote is found in the rule file; Context supports violation is `true` or `false`; Confidence is numeric from `0.0` to `1.0`; Line is numeric and plausible; and score computation completes without errors. +- `same-anchor groups` means findings sharing source file, rule path, Evidence quote, Rule quote, and Line. Treat them as semantic-review warnings only. +- Density scoring is strict by default, based on finding count divided by word count, clamped to `0-10`. Recompute the final score after main-agent edits. + +## Response Normalization + +- Parse the reviewer output against `finding-template.md`. +- Accept only output that matches `finding-template.md`; route severity fields or unsupported sections back through parser-clean repair. +- Re-run once if the output is not parser-clean. +- Preserve same-anchor groups as semantic review warnings during consolidation. +- Aggregate findings into a scored report after normalization. + +## Host Behavior + +- Treat reviewer output as structured review data, not prose. +- Keep the consolidation layer responsible for semantic review of same-anchor groups and scoring density. +- Keep the reviewer responsible for recall and rule-specific judgment. diff --git a/.codex/skills/agentic-content-review/rules/default/ai-patterns.md b/.codex/skills/agentic-content-review/rules/default/ai-patterns.md new file mode 100644 index 0000000..3c690e7 --- /dev/null +++ b/.codex/skills/agentic-content-review/rules/default/ai-patterns.md @@ -0,0 +1,7 @@ +# AI Patterns + +Flag generic AI-like transition phrases that add no content, including "in today's fast-paced world", "delve into", "unlock the power of", "it is worth noting", and "seamlessly" when used as empty praise. + +Flag broad, polished-but-empty statements that could apply to almost any product or topic. + +Treat common phrases as acceptable when they are specific and supported. Flag them only when the surrounding sentence is generic, unsupported, or non-specific. diff --git a/.codex/skills/agentic-content-review/rules/default/directness.md b/.codex/skills/agentic-content-review/rules/default/directness.md new file mode 100644 index 0000000..08bf402 --- /dev/null +++ b/.codex/skills/agentic-content-review/rules/default/directness.md @@ -0,0 +1,7 @@ +# Directness + +Flag filler phrases that do not add meaning, including "it is important to note", "it should be noted", "in order to", and similar setup phrases. + +Flag hedging that weakens a concrete claim without adding accuracy, including "may help", "can potentially", and "might be able to" when the surrounding context supports a direct statement. + +Treat cautious wording as acceptable when it is necessary for technical accuracy, uncertainty, legal caution, or honest limitation. diff --git a/.codex/skills/agentic-content-review/rules/default/rule-index.yml b/.codex/skills/agentic-content-review/rules/default/rule-index.yml new file mode 100644 index 0000000..6bc883b --- /dev/null +++ b/.codex/skills/agentic-content-review/rules/default/rule-index.yml @@ -0,0 +1,18 @@ +pack: default +active: true +rules: + - id: directness + name: Directness + path: directness.md + active: true + description: Flag filler phrases, hedging, and indirect phrasing that weakens clarity. + - id: unsupported-claims + name: Unsupported Claims + path: unsupported-claims.md + active: true + description: Flag claims that need evidence, qualification, or source support. + - id: ai-patterns + name: AI Patterns + path: ai-patterns.md + active: true + description: Flag generic AI-like phrasing and empty transitional language. diff --git a/.codex/skills/agentic-content-review/rules/default/unsupported-claims.md b/.codex/skills/agentic-content-review/rules/default/unsupported-claims.md new file mode 100644 index 0000000..98a0526 --- /dev/null +++ b/.codex/skills/agentic-content-review/rules/default/unsupported-claims.md @@ -0,0 +1,7 @@ +# Unsupported Claims + +Flag claims that present superiority, certainty, performance, safety, or business impact without evidence in the surrounding content. + +Flag phrases such as "the best", "the fastest", "guaranteed", "proven", and "trusted by teams" when the content does not include support, qualification, or a source. + +Treat clearly scoped opinion, quoted customer language, and claims immediately supported by evidence in the same section as acceptable. diff --git a/.codex/skills/agentic-content-review/scripts/list-active-rules.ts b/.codex/skills/agentic-content-review/scripts/list-active-rules.ts new file mode 100755 index 0000000..b92e97d --- /dev/null +++ b/.codex/skills/agentic-content-review/scripts/list-active-rules.ts @@ -0,0 +1,191 @@ +#!/usr/bin/env tsx +import { existsSync, readdirSync, readFileSync, statSync } from "node:fs"; +import path from "node:path"; +import process from "node:process"; + +type RawRule = { + id?: string; + name?: string; + path?: string; + active?: boolean; + description?: string; +}; + +type RuleIndex = { + pack?: string; + active?: boolean; + rules: RawRule[]; +}; + +type ActiveRule = { + pack: string; + id: string; + name: string; + description: string; + rulePath: string; + indexPath: string; +}; + +type Output = { + cwd: string; + rules: ActiveRule[]; + errors: string[]; +}; + +const SKILL_ROOT = path.resolve(path.dirname(new URL(import.meta.url).pathname), ".."); +const DEFAULT_INDEX = path.join(SKILL_ROOT, "rules", "default", "rule-index.yml"); + +function parseBoolean(value: string): boolean | undefined { + const normalized = value.trim().toLowerCase(); + if (normalized === "true") return true; + if (normalized === "false") return false; + return undefined; +} + +function unquote(value: string): string { + const trimmed = value.trim(); + if ( + (trimmed.startsWith('"') && trimmed.endsWith('"')) || + (trimmed.startsWith("'") && trimmed.endsWith("'")) + ) { + return trimmed.slice(1, -1); + } + return trimmed; +} + +function parseKeyValue(line: string): { key: string; value: string } | undefined { + const match = line.match(/^([A-Za-z0-9_-]+):\s*(.*)$/); + if (!match) return undefined; + return { key: match[1]!, value: unquote(match[2] ?? "") }; +} + +function assignValue(target: Record, key: string, value: string): void { + if (key === "active") { + const parsed = parseBoolean(value); + target[key] = parsed ?? value; + return; + } + target[key] = value; +} + +function parseRuleIndexYaml(content: string): RuleIndex { + const result: RuleIndex = { rules: [] }; + let inRules = false; + let currentRule: RawRule | undefined; + + for (const rawLine of content.split(/\r?\n/)) { + const withoutComment = rawLine.replace(/\s+#.*$/, ""); + if (withoutComment.trim() === "") continue; + + const indent = withoutComment.match(/^\s*/)?.[0].length ?? 0; + const trimmed = withoutComment.trim(); + + if (indent === 0 && trimmed === "rules:") { + inRules = true; + continue; + } + + if (!inRules && indent === 0) { + const parsed = parseKeyValue(trimmed); + if (parsed) assignValue(result as unknown as Record, parsed.key, parsed.value); + continue; + } + + if (inRules) { + if (trimmed.startsWith("- ")) { + currentRule = {}; + result.rules.push(currentRule); + const rest = trimmed.slice(2).trim(); + if (rest) { + const parsed = parseKeyValue(rest); + if (parsed) assignValue(currentRule as Record, parsed.key, parsed.value); + } + continue; + } + + if (currentRule) { + const parsed = parseKeyValue(trimmed); + if (parsed) assignValue(currentRule as Record, parsed.key, parsed.value); + } + } + } + + return result; +} + +function listWorkspaceIndexes(root: string): string[] { + const rulesRoot = path.join(root, ".vlint", "rules"); + if (!existsSync(rulesRoot)) return []; + + return readdirSync(rulesRoot) + .map((entry) => path.join(rulesRoot, entry)) + .filter((entryPath) => statSync(entryPath).isDirectory()) + .map((packDir) => path.join(packDir, "rule-index.yml")) + .filter((indexPath) => existsSync(indexPath)); +} + +function readActiveRules(indexPath: string): ActiveRule[] { + const parsed = parseRuleIndexYaml(readFileSync(indexPath, "utf8")); + if (parsed.active === false) return []; + + const pack = parsed.pack || path.basename(path.dirname(indexPath)); + const activeRules: ActiveRule[] = []; + + for (const rule of parsed.rules) { + if (rule.active === false) continue; + if (!rule.id || !rule.name || !rule.path) { + throw new Error(`Invalid rule entry in ${indexPath}: id, name, and path are required`); + } + + const rulePath = path.resolve(path.dirname(indexPath), rule.path); + if (!existsSync(rulePath)) { + throw new Error(`Missing rule file for ${pack}.${rule.id}: ${rulePath}`); + } + + activeRules.push({ + pack, + id: rule.id, + name: rule.name, + description: rule.description || "", + rulePath, + indexPath: path.resolve(indexPath), + }); + } + + return activeRules; +} + +function parseArgs(args: string[]): { cwd: string; includeDefaults: boolean } { + const includeDefaults = args.includes("--include-defaults"); + const positional = args.filter((arg) => arg !== "--include-defaults"); + return { + cwd: positional[0] ? path.resolve(positional[0]) : process.cwd(), + includeDefaults, + }; +} + +function main(): void { + const { cwd, includeDefaults } = parseArgs(process.argv.slice(2)); + const errors: string[] = []; + const workspaceIndexes = listWorkspaceIndexes(cwd); + const indexes = [...workspaceIndexes]; + + if ((includeDefaults || workspaceIndexes.length === 0) && existsSync(DEFAULT_INDEX)) { + indexes.push(DEFAULT_INDEX); + } + + const rules: ActiveRule[] = []; + for (const indexPath of indexes) { + try { + rules.push(...readActiveRules(indexPath)); + } catch (error) { + errors.push(error instanceof Error ? error.message : String(error)); + } + } + + const output: Output = { cwd, rules, errors }; + console.log(JSON.stringify(output, null, 2)); + process.exitCode = errors.length > 0 ? 1 : 0; +} + +main(); diff --git a/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts new file mode 100755 index 0000000..f5830cd --- /dev/null +++ b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts @@ -0,0 +1,436 @@ +#!/usr/bin/env tsx +import { existsSync, readFileSync, realpathSync, statSync } from "node:fs"; +import path from "node:path"; +import process from "node:process"; + +type Finding = { + index: number; + rulePath: string; + sourceFile: string; + line: number; + evidenceQuote: string; + ruleQuote: string; + flagReasoning: string; + issue: string; + plausibleNonViolation: string; + contextSupportsViolation: boolean; + suggestion: string; + confidence: number; +}; + +type Warning = { + type: string; + message: string; + findingIndexes?: number[]; +}; + +type Output = { + valid: boolean; + findingCount: number; + wordCount: number; + score: number; + findings: Finding[]; + warnings: Warning[]; + errors: string[]; +}; + +const REQUIRED_FIELDS = [ + "Rule path", + "Source file", + "Line", + "Evidence quote", + "Rule quote", + "Flag reasoning", + "Issue", + "Plausible non-violation", + "Context supports violation", + "Suggestion", + "Confidence", +] as const; + +type FieldName = (typeof REQUIRED_FIELDS)[number]; + +type RawFinding = Record; + +function stripWrapper(value: string): string { + const trimmed = value.trim(); + if ( + (trimmed.startsWith("`") && trimmed.endsWith("`")) || + (trimmed.startsWith('"') && trimmed.endsWith('"')) + ) { + return trimmed.slice(1, -1); + } + return trimmed; +} + +function normalizeAnchor(value: string): string { + return value.trim().replace(/\s+/g, " "); +} + +function countWords(content: string): number { + const trimmed = content.trim(); + return trimmed === "" ? 0 : trimmed.split(/\s+/).length; +} + +function parseFieldLine(line: string): { key: FieldName; value: string } | undefined { + const match = line.match(/^\s*(?:[-*]\s*)?([^:]+):\s*(.*)$/); + if (!match) return undefined; + + const rawKey = match[1]!.trim(); + const key = REQUIRED_FIELDS.find((field) => field === rawKey); + if (!key) return undefined; + + return { key, value: stripWrapper(match[2] ?? "") }; +} + +function parseBoolean(value: string): boolean | undefined { + const normalized = value.trim().toLowerCase(); + if (normalized === "true") return true; + if (normalized === "false") return false; + return undefined; +} + +function isNoFindings(markdown: string): boolean { + return /^## Findings\s*\r?\n\s*No findings\.\s*$/.test(markdown.trim()); +} + +function isWithinRoot(filePath: string, root: string): boolean { + const relative = path.relative(root, filePath); + return relative === "" || (!relative.startsWith("..") && !path.isAbsolute(relative)); +} + +function resolveReadableFile( + filePath: string, + allowedRoots: string[], + label: string, + errors: string[], + baseDir: string +): string | undefined { + const candidates = path.isAbsolute(filePath) + ? [filePath] + : [path.resolve(baseDir, filePath), path.resolve(process.cwd(), filePath)]; + const resolved = candidates.find((candidate) => existsSync(candidate)); + if (!resolved) { + errors.push(`${label} not found: ${candidates.join(" or ")}`); + return undefined; + } + + let realPath: string; + try { + realPath = realpathSync(resolved); + } catch (error) { + errors.push(`${label} could not be resolved: ${resolved}`); + return undefined; + } + + if (!statSync(realPath).isFile()) { + errors.push(`${label} is not a file: ${realPath}`); + return undefined; + } + + const allowedRealRoots = allowedRoots + .filter((root) => existsSync(root)) + .map((root) => realpathSync(root)); + if (!allowedRealRoots.some((root) => isWithinRoot(realPath, root))) { + errors.push(`${label} is outside allowed roots: ${realPath}`); + return undefined; + } + + return realPath; +} + +function parseRawFindings(markdown: string, errors: string[]): RawFinding[] { + const normalized = markdown.trim(); + const withoutOptionalHeading = normalized.replace(/^## Findings\s*\r?\n/, "").trim(); + const firstFindingIndex = withoutOptionalHeading.search(/^### Finding\s*$/m); + + if (firstFindingIndex === -1) { + errors.push("No finding blocks found. Use `## Findings` followed by `No findings.` for an empty review."); + return []; + } + + if (withoutOptionalHeading.slice(0, firstFindingIndex).trim() !== "") { + errors.push("Unsupported content found before the first finding block."); + } + + const blocks = withoutOptionalHeading + .slice(firstFindingIndex) + .split(/(?=^### Finding\s*$)/m) + .filter((block) => block.trim() !== ""); + + return blocks.map((block, blockIndex) => { + const fields = {} as RawFinding; + const seen = new Set(); + let expectedFieldIndex = 0; + + for (const line of block.split(/\r?\n/)) { + const trimmed = line.trim(); + if (trimmed === "" || trimmed === "### Finding") continue; + + const parsed = parseFieldLine(line); + if (!parsed) { + const unknownField = trimmed.match(/^\s*(?:[-*]\s*)?([^:]+):/); + if (unknownField) { + errors.push(`Finding ${blockIndex + 1} has unsupported field: ${unknownField[1]!.trim()}`); + } else if (trimmed.startsWith("#")) { + errors.push(`Finding ${blockIndex + 1} has unsupported section: ${trimmed}`); + } else { + errors.push(`Finding ${blockIndex + 1} has unsupported content: ${trimmed}`); + } + continue; + } + + if (seen.has(parsed.key)) { + errors.push(`Finding ${blockIndex + 1} repeats field: ${parsed.key}`); + } + const actualFieldIndex = REQUIRED_FIELDS.indexOf(parsed.key); + if (actualFieldIndex !== expectedFieldIndex) { + errors.push( + `Finding ${blockIndex + 1} field order mismatch: expected ${REQUIRED_FIELDS[expectedFieldIndex]}, found ${parsed.key}.` + ); + } + seen.add(parsed.key); + fields[parsed.key] = parsed.value; + expectedFieldIndex += 1; + } + + for (const field of REQUIRED_FIELDS) { + if (fields[field] === undefined) { + errors.push(`Finding ${blockIndex + 1} is missing required field: ${field}`); + } + } + + return fields; + }); +} + +function lineCount(content: string): number { + return content.split(/\r?\n/).length; +} + +function lineContainsEvidence(content: string, line: number, evidence: string): boolean { + const lines = content.split(/\r?\n/); + return (lines[line - 1] ?? "").includes(evidence); +} + +function validateFinding( + raw: RawFinding, + index: number, + warnings: Warning[], + errors: string[], + allowedRoots: string[], + baseDir: string +): Finding | undefined { + const sourceFileInput = raw["Source file"] || ""; + const rulePathInput = raw["Rule path"] || ""; + const lineRaw = raw.Line || ""; + const confidenceRaw = raw.Confidence || ""; + const evidenceQuote = raw["Evidence quote"] || ""; + const ruleQuote = raw["Rule quote"] || ""; + const contextSupportsViolationRaw = raw["Context supports violation"] || ""; + const flagReasoning = raw["Flag reasoning"] || ""; + + let sourceContent = ""; + let ruleContent = ""; + + const sourceFile = resolveReadableFile( + sourceFileInput, + allowedRoots, + `Finding ${index + 1}: source file`, + errors, + baseDir + ); + if (sourceFile) sourceContent = readFileSync(sourceFile, "utf8"); + + const rulePath = resolveReadableFile( + rulePathInput, + allowedRoots, + `Finding ${index + 1}: rule path`, + errors, + baseDir + ); + if (rulePath) ruleContent = readFileSync(rulePath, "utf8"); + + if (evidenceQuote.trim() === "") { + errors.push(`Finding ${index + 1}: Evidence quote must not be empty.`); + } + + if (ruleQuote.trim() === "") { + errors.push(`Finding ${index + 1}: Rule quote must not be empty.`); + } + + const line = Number(lineRaw); + if (!Number.isInteger(line) || line <= 0) { + errors.push(`Finding ${index + 1}: Line must be a positive integer.`); + } else if (sourceContent && line > lineCount(sourceContent)) { + errors.push(`Finding ${index + 1}: Line ${line} exceeds source line count.`); + } + + const confidence = Number(confidenceRaw); + if (!Number.isFinite(confidence) || confidence < 0 || confidence > 1) { + errors.push(`Finding ${index + 1}: Confidence must be a number between 0 and 1.`); + } + + const contextSupportsViolation = parseBoolean(contextSupportsViolationRaw); + if (contextSupportsViolation === undefined) { + errors.push(`Finding ${index + 1}: Context supports violation must be true or false.`); + } + + if (flagReasoning.trim() === "") { + errors.push(`Finding ${index + 1}: Flag reasoning must not be empty.`); + } + + if (sourceContent && evidenceQuote.trim() !== "" && !sourceContent.includes(evidenceQuote)) { + errors.push(`Finding ${index + 1}: Evidence quote was not found in source file.`); + } + + if (ruleContent && ruleQuote.trim() !== "" && !ruleContent.includes(ruleQuote)) { + errors.push(`Finding ${index + 1}: Rule quote was not found in rule file.`); + } + + if ( + sourceContent && + evidenceQuote.trim() !== "" && + sourceContent.includes(evidenceQuote) && + Number.isInteger(line) && + line > 0 && + line <= lineCount(sourceContent) + ) { + if (!lineContainsEvidence(sourceContent, line, evidenceQuote)) { + warnings.push({ + type: "evidence_quote_line_mismatch", + message: `Finding ${index + 1}: Evidence quote appears in the source file, but not on the reported line.`, + findingIndexes: [index], + }); + } + } + + if ( + !sourceFile || + !rulePath || + !Number.isInteger(line) || + line <= 0 || + !Number.isFinite(confidence) || + confidence < 0 || + confidence > 1 || + contextSupportsViolation === undefined + ) { + return undefined; + } + + return { + index, + rulePath, + sourceFile, + line, + evidenceQuote, + ruleQuote, + flagReasoning, + issue: raw.Issue || "", + plausibleNonViolation: raw["Plausible non-violation"] || "", + contextSupportsViolation, + suggestion: raw.Suggestion || "", + confidence, + }; +} + +function addSameAnchorWarnings(findings: Finding[], warnings: Warning[]): void { + const groups = new Map(); + + findings.forEach((finding, index) => { + const key = [ + finding.sourceFile, + finding.rulePath, + normalizeAnchor(finding.evidenceQuote), + normalizeAnchor(finding.ruleQuote), + String(finding.line), + ].join("\u0000"); + const group = groups.get(key) ?? []; + group.push(index); + groups.set(key, group); + }); + + for (const findingIndexes of groups.values()) { + if (findingIndexes.length > 1) { + warnings.push({ + type: "same_anchor_semantic_review_needed", + message: "Multiple findings share source/rule/evidence anchors; main agent must review semantic overlap.", + findingIndexes, + }); + } + } +} + +function computeWordCount(findings: Finding[]): number { + const sourceFiles = new Set(findings.map((finding) => finding.sourceFile)); + let total = 0; + for (const sourceFile of sourceFiles) { + total += countWords(readFileSync(sourceFile, "utf8")); + } + return total; +} + +function computeScore(findingCount: number, wordCount: number): number { + const wordCountForScore = wordCount || 1; + const density = (findingCount / wordCountForScore) * 100; + const rawScore = Math.max(0, Math.min(100, 100 - density * 10)); + return Number((rawScore / 10).toFixed(1)); +} + +function main(): void { + const reviewPath = process.argv[2]; + const errors: string[] = []; + const warnings: Warning[] = []; + + if (!reviewPath) { + errors.push("Usage: parse-review-and-score.ts "); + const output: Output = { valid: false, findingCount: 0, wordCount: 0, score: 0, findings: [], warnings, errors }; + console.log(JSON.stringify(output, null, 2)); + process.exitCode = 1; + return; + } + + const resolvedReviewPath = path.resolve(reviewPath); + if (!existsSync(resolvedReviewPath)) { + errors.push(`Review file not found: ${resolvedReviewPath}`); + const output: Output = { valid: false, findingCount: 0, wordCount: 0, score: 0, findings: [], warnings, errors }; + console.log(JSON.stringify(output, null, 2)); + process.exitCode = 1; + return; + } + + const reviewDir = path.dirname(realpathSync(resolvedReviewPath)); + const skillRoot = path.resolve(path.dirname(new URL(import.meta.url).pathname), ".."); + const allowedRoots = [process.cwd(), reviewDir, path.dirname(reviewDir), skillRoot]; + + const markdown = readFileSync(resolvedReviewPath, "utf8"); + if (isNoFindings(markdown)) { + const output: Output = { valid: true, findingCount: 0, wordCount: 0, score: 10, findings: [], warnings, errors }; + console.log(JSON.stringify(output, null, 2)); + return; + } + + const rawFindings = parseRawFindings(markdown, errors); + const findings = rawFindings + .map((raw, index) => validateFinding(raw, index, warnings, errors, allowedRoots, reviewDir)) + .filter((finding): finding is Finding => finding !== undefined); + + addSameAnchorWarnings(findings, warnings); + + const wordCount = computeWordCount(findings); + const findingCount = rawFindings.length; + const output: Output = { + valid: errors.length === 0, + findingCount, + wordCount, + score: computeScore(findingCount, wordCount), + findings, + warnings, + errors, + }; + + console.log(JSON.stringify(output, null, 2)); + process.exitCode = output.valid ? 0 : 1; +} + +main(); From b1aa12d7891aa3330b3b7ce8a13c6343214f8e8a Mon Sep 17 00:00:00 2001 From: Osho Emmanuel Date: Wed, 22 Apr 2026 10:58:28 +0100 Subject: [PATCH 3/7] feat(skill): add resolveRuleName and buildSessionLog to parse-review-and-score - Export resolveRuleName: reads rule-index.yml, returns human-readable name matched by path; falls back to basename - Add SessionFinding and SessionLog exported types - Export buildSessionLog: maps Finding[] to SessionLog with ruleName resolved per finding - Export writeSessionLog: creates sessionsDir, writes colon-safe timestamped JSON log - Add parseWriteLogFlag helper for --write-log flag detection --- .../scripts/parse-review-and-score.ts | 115 ++++++++- .../parse-review-and-score.test.ts | 241 ++++++++++++++++++ 2 files changed, 355 insertions(+), 1 deletion(-) create mode 100644 tests/evaluations/parse-review-and-score.test.ts diff --git a/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts index f5830cd..55137eb 100755 --- a/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts +++ b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts @@ -1,5 +1,5 @@ #!/usr/bin/env tsx -import { existsSync, readFileSync, realpathSync, statSync } from "node:fs"; +import { existsSync, mkdirSync, readFileSync, realpathSync, statSync, writeFileSync } from "node:fs"; import path from "node:path"; import process from "node:process"; @@ -34,6 +34,30 @@ type Output = { errors: string[]; }; +type SessionFinding = { + ruleName: string; + rulePath: string; + sourceFile: string; + line: number; + evidenceQuote: string; + ruleQuote: string; + flagReasoning: string; + issue: string; + plausibleNonViolation: string; + contextSupportsViolation: boolean; + suggestion: string; + confidence: number; +}; + +export type SessionLog = { + sessionId: string; + sourceFile: string; + wordCount: number; + findingCount: number; + score: number; + findings: SessionFinding[]; +}; + const REQUIRED_FIELDS = [ "Rule path", "Source file", @@ -90,6 +114,87 @@ function parseBoolean(value: string): boolean | undefined { return undefined; } +export function resolveRuleName(rulePath: string): string { + const ruleDir = path.dirname(rulePath); + const indexPath = path.join(ruleDir, "rule-index.yml"); + const baseName = path.basename(rulePath, path.extname(rulePath)); + + if (!existsSync(indexPath)) return baseName; + + let content: string; + try { + content = readFileSync(indexPath, "utf8"); + } catch { + return baseName; + } + + const ruleBaseName = path.basename(rulePath); + const lines = content.split(/\r?\n/); + let currentRulePath: string | undefined; + let currentRuleName: string | undefined; + + for (const rawLine of lines) { + const withoutComment = rawLine.replace(/\s+#.*$/, "").trim(); + if (withoutComment.startsWith("- ")) { + currentRulePath = undefined; + currentRuleName = undefined; + } + const nameMatch = withoutComment.match(/^name:\s*(.+)$/); + if (nameMatch) currentRuleName = nameMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); + const pathMatch = withoutComment.match(/^path:\s*(.+)$/); + if (pathMatch) currentRulePath = pathMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); + + if (currentRulePath === ruleBaseName && currentRuleName) { + return currentRuleName; + } + } + + return baseName; +} + +export function buildSessionLog( + findings: Finding[], + wordCount: number, + findingCount: number, + score: number, + timestamp: string +): SessionLog { + const sourceFile = findings[0]?.sourceFile ?? ""; + return { + sessionId: timestamp, + sourceFile, + wordCount, + findingCount, + score, + findings: findings.map((f) => ({ + ruleName: resolveRuleName(f.rulePath), + rulePath: f.rulePath, + sourceFile: f.sourceFile, + line: f.line, + evidenceQuote: f.evidenceQuote, + ruleQuote: f.ruleQuote, + flagReasoning: f.flagReasoning, + issue: f.issue, + plausibleNonViolation: f.plausibleNonViolation, + contextSupportsViolation: f.contextSupportsViolation, + suggestion: f.suggestion, + confidence: f.confidence, + })), + }; +} + +export function writeSessionLog(log: SessionLog, sessionsDir: string): void { + mkdirSync(sessionsDir, { recursive: true }); + const timestampPart = log.sessionId.replace(/:/g, "-"); + const sourcePart = path.basename(log.sourceFile, path.extname(log.sourceFile)) || "unknown"; + const fileName = `${timestampPart}-${sourcePart}.json`; + writeFileSync(path.join(sessionsDir, fileName), JSON.stringify(log, null, 2)); +} + +function parseWriteLogFlag(args: string[]): boolean { + return args.includes("--write-log"); +} + function isNoFindings(markdown: string): boolean { return /^## Findings\s*\r?\n\s*No findings\.\s*$/.test(markdown.trim()); } @@ -429,6 +534,14 @@ function main(): void { errors, }; + const writeLog = parseWriteLogFlag(process.argv.slice(2)); + if (writeLog) { + const sessionsDir = path.join(process.cwd(), ".vlint", "sessions"); + const timestamp = new Date().toISOString(); + const sessionLog = buildSessionLog(findings, wordCount, findingCount, output.score, timestamp); + writeSessionLog(sessionLog, sessionsDir); + } + console.log(JSON.stringify(output, null, 2)); process.exitCode = output.valid ? 0 : 1; } diff --git a/tests/evaluations/parse-review-and-score.test.ts b/tests/evaluations/parse-review-and-score.test.ts new file mode 100644 index 0000000..842ad26 --- /dev/null +++ b/tests/evaluations/parse-review-and-score.test.ts @@ -0,0 +1,241 @@ +import { describe, it, expect, beforeEach, afterEach } from "vitest"; +import { mkdirSync, writeFileSync, rmSync } from "node:fs"; +import path from "node:path"; +import os from "node:os"; +import { readdirSync, readFileSync } from "node:fs"; +import type { SessionLog } from "../../.codex/skills/agentic-content-review/scripts/parse-review-and-score.js"; + +import { + resolveRuleName, + buildSessionLog, + writeSessionLog, +} from "../../.codex/skills/agentic-content-review/scripts/parse-review-and-score.js"; +import { execSync } from "node:child_process"; + +let tmpDir: string; + +beforeEach(() => { + tmpDir = path.join(os.tmpdir(), "vlint-test-" + Date.now()); + mkdirSync(tmpDir, { recursive: true }); +}); + +afterEach(() => { + rmSync(tmpDir, { recursive: true, force: true }); +}); + +describe("resolveRuleName", () => { + it("returns name from rule-index.yml when matched by path", () => { + const ruleDir = path.join(tmpDir, "rules"); + mkdirSync(ruleDir); + writeFileSync(path.join(ruleDir, "ai-patterns.md"), "# AI Patterns rule"); + writeFileSync( + path.join(ruleDir, "rule-index.yml"), + `pack: default\nactive: true\nrules:\n - id: ai-patterns\n name: AI Patterns\n path: ai-patterns.md\n active: true\n` + ); + const rulePath = path.join(ruleDir, "ai-patterns.md"); + expect(resolveRuleName(rulePath)).toBe("AI Patterns"); + }); + + it("falls back to basename when rule-index.yml is missing", () => { + const ruleDir = path.join(tmpDir, "rules"); + mkdirSync(ruleDir); + writeFileSync(path.join(ruleDir, "wordiness.md"), "# Wordiness rule"); + const rulePath = path.join(ruleDir, "wordiness.md"); + expect(resolveRuleName(rulePath)).toBe("wordiness"); + }); + + it("falls back to basename when rule not found in index", () => { + const ruleDir = path.join(tmpDir, "rules"); + mkdirSync(ruleDir); + writeFileSync( + path.join(ruleDir, "rule-index.yml"), + `pack: default\nactive: true\nrules:\n - id: other\n name: Other Rule\n path: other.md\n active: true\n` + ); + writeFileSync(path.join(ruleDir, "wordiness.md"), "# Wordiness rule"); + const rulePath = path.join(ruleDir, "wordiness.md"); + expect(resolveRuleName(rulePath)).toBe("wordiness"); + }); +}); + +describe("buildSessionLog", () => { + it("builds log with correct top-level fields", () => { + const finding = { + index: 0, + rulePath: path.join(tmpDir, "rules", "ai-patterns.md"), + sourceFile: path.join(tmpDir, "docs", "quickstart.md"), + line: 12, + evidenceQuote: "leveraging synergies", + ruleQuote: "Flag AI-like phrasing", + flagReasoning: "matches pattern", + issue: "AI phrasing detected", + plausibleNonViolation: "could be intentional", + contextSupportsViolation: true, + suggestion: "Rewrite directly", + confidence: 0.85, + }; + const timestamp = "2026-04-22T14:32:00Z"; + const log = buildSessionLog([finding], 423, 3, 8.5, timestamp); + expect(log.sessionId).toBe(timestamp); + expect(log.sourceFile).toBe(finding.sourceFile); + expect(log.wordCount).toBe(423); + expect(log.findingCount).toBe(3); + expect(log.score).toBe(8.5); + expect(log.findings).toHaveLength(1); + }); + + it("maps finding fields 1:1 with ruleName before rulePath", () => { + const ruleDir = path.join(tmpDir, "rules"); + mkdirSync(ruleDir, { recursive: true }); + writeFileSync( + path.join(ruleDir, "rule-index.yml"), + `pack: default\nactive: true\nrules:\n - id: ai-patterns\n name: AI Patterns\n path: ai-patterns.md\n active: true\n` + ); + writeFileSync(path.join(ruleDir, "ai-patterns.md"), "# rule"); + const finding = { + index: 0, + rulePath: path.join(ruleDir, "ai-patterns.md"), + sourceFile: "/docs/quickstart.md", + line: 5, + evidenceQuote: "quote", + ruleQuote: "rule quote", + flagReasoning: "reason", + issue: "issue text", + plausibleNonViolation: "benign", + contextSupportsViolation: false, + suggestion: "fix it", + confidence: 0.7, + }; + const log = buildSessionLog([finding], 100, 1, 9.0, "2026-04-22T14:32:00Z"); + const f = log.findings[0]!; + expect(Object.keys(f)[0]).toBe("ruleName"); + expect(Object.keys(f)[1]).toBe("rulePath"); + expect(f.ruleName).toBe("AI Patterns"); + expect(f.confidence).toBe(0.7); + }); + + it("derives sourceFile from first finding when findings exist", () => { + const finding = { + index: 0, + rulePath: "/rules/r.md", + sourceFile: "/docs/overview.md", + line: 1, + evidenceQuote: "e", + ruleQuote: "r", + flagReasoning: "f", + issue: "i", + plausibleNonViolation: "p", + contextSupportsViolation: true, + suggestion: "s", + confidence: 0.9, + }; + const log = buildSessionLog([finding], 50, 1, 9.5, "2026-04-22T14:32:00Z"); + expect(log.sourceFile).toBe("/docs/overview.md"); + }); + + it("returns empty sourceFile when findings array is empty", () => { + const log = buildSessionLog([], 0, 0, 10, "2026-04-22T14:32:00Z"); + expect(log.sourceFile).toBe(""); + expect(log.findings).toHaveLength(0); + }); +}); + +describe("writeSessionLog", () => { + it("creates sessions directory and writes the log file", () => { + const sessionsDir = path.join(tmpDir, ".vlint", "sessions"); + const log: SessionLog = { + sessionId: "2026-04-22T14:32:00Z", + sourceFile: "docs/quickstart.md", + wordCount: 100, + findingCount: 1, + score: 9.0, + findings: [], + }; + writeSessionLog(log, sessionsDir); + const files = readdirSync(sessionsDir); + expect(files).toHaveLength(1); + expect(files[0]).toMatch(/^2026-04-22T14-32-00Z-quickstart\.json$/); + }); + + it("file content is valid JSON matching the log shape", () => { + const sessionsDir = path.join(tmpDir, ".vlint", "sessions"); + const log: SessionLog = { + sessionId: "2026-04-22T14:32:00Z", + sourceFile: "/abs/path/to/overview.md", + wordCount: 200, + findingCount: 0, + score: 10.0, + findings: [], + }; + writeSessionLog(log, sessionsDir); + const files = readdirSync(sessionsDir); + const content = readFileSync(path.join(sessionsDir, files[0]!), "utf8"); + const parsed = JSON.parse(content); + expect(parsed.sessionId).toBe("2026-04-22T14:32:00Z"); + expect(parsed.sourceFile).toBe("/abs/path/to/overview.md"); + expect(parsed.score).toBe(10.0); + expect(Array.isArray(parsed.findings)).toBe(true); + }); + + it("replaces colons with hyphens in timestamp portion of filename", () => { + const sessionsDir = path.join(tmpDir, ".vlint", "sessions"); + const log: SessionLog = { + sessionId: "2026-04-22T09:05:30Z", + sourceFile: "docs/config.md", + wordCount: 50, + findingCount: 0, + score: 10.0, + findings: [], + }; + writeSessionLog(log, sessionsDir); + const files = readdirSync(sessionsDir); + expect(files[0]).toMatch(/^2026-04-22T09-05-30Z-config\.json$/); + }); +}); + +describe("--write-log flag integration", () => { + it("creates a session log file in .vlint/sessions when flag is passed", () => { + const ruleDir = path.join(tmpDir, "rules", "default"); + const sourceDir = path.join(tmpDir, "docs"); + mkdirSync(ruleDir, { recursive: true }); + mkdirSync(sourceDir, { recursive: true }); + + writeFileSync(path.join(sourceDir, "test-doc.md"), "This leverages synergies to unlock value."); + writeFileSync(path.join(ruleDir, "ai-patterns.md"), "Flag AI-generated phrasing like 'leveraging synergies'."); + writeFileSync( + path.join(ruleDir, "rule-index.yml"), + `pack: default\nactive: true\nrules:\n - id: ai-patterns\n name: AI Patterns\n path: ai-patterns.md\n active: true\n` + ); + + const reviewMd = [ + "### Finding", + `- Rule path: \`${path.join(ruleDir, "ai-patterns.md")}\``, + `- Source file: \`${path.join(sourceDir, "test-doc.md")}\``, + "- Line: `1`", + "- Evidence quote: `leverages synergies to unlock value`", + "- Rule quote: `Flag AI-generated phrasing like 'leveraging synergies'`", + "- Flag reasoning: `matches AI phrasing pattern`", + "- Issue: `AI-like phrasing detected`", + "- Plausible non-violation: `could be intentional industry language`", + "- Context supports violation: `true`", + "- Suggestion: `Rewrite to be more direct`", + "- Confidence: `0.85`", + ].join("\n"); + + const reviewPath = path.join(tmpDir, "review.md"); + writeFileSync(reviewPath, reviewMd); + + execSync( + `npx tsx ${path.resolve(".codex/skills/agentic-content-review/scripts/parse-review-and-score.ts")} ${reviewPath} --write-log`, + { cwd: tmpDir, env: { ...process.env } } + ); + + const sessionsDir = path.join(tmpDir, ".vlint", "sessions"); + const files = readdirSync(sessionsDir); + expect(files).toHaveLength(1); + expect(files[0]).toMatch(/test-doc\.json$/); + + const log = JSON.parse(readFileSync(path.join(sessionsDir, files[0]!), "utf8")); + expect(log.findings[0].ruleName).toBe("AI Patterns"); + expect(log.findings[0].confidence).toBe(0.85); + }); +}); From 19b721d652f0ecf64bfd29682bcd6685248c4bbb Mon Sep 17 00:00:00 2001 From: Osho Emmanuel Date: Wed, 22 Apr 2026 11:04:04 +0100 Subject: [PATCH 4/7] fix(skill): write session log on no-findings early return when --write-log is set --- .../scripts/parse-review-and-score.ts | 7 +++++++ .../parse-review-and-score.test.ts | 20 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts index 55137eb..7db92e7 100755 --- a/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts +++ b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts @@ -510,6 +510,13 @@ function main(): void { const markdown = readFileSync(resolvedReviewPath, "utf8"); if (isNoFindings(markdown)) { + const writeLog = parseWriteLogFlag(process.argv.slice(2)); + if (writeLog) { + const sessionsDir = path.join(process.cwd(), ".vlint", "sessions"); + const timestamp = new Date().toISOString(); + const sessionLog = buildSessionLog([], 0, 0, 10, timestamp); + writeSessionLog(sessionLog, sessionsDir); + } const output: Output = { valid: true, findingCount: 0, wordCount: 0, score: 10, findings: [], warnings, errors }; console.log(JSON.stringify(output, null, 2)); return; diff --git a/tests/evaluations/parse-review-and-score.test.ts b/tests/evaluations/parse-review-and-score.test.ts index 842ad26..b85db75 100644 --- a/tests/evaluations/parse-review-and-score.test.ts +++ b/tests/evaluations/parse-review-and-score.test.ts @@ -238,4 +238,24 @@ describe("--write-log flag integration", () => { expect(log.findings[0].ruleName).toBe("AI Patterns"); expect(log.findings[0].confidence).toBe(0.85); }); + + it("writes an empty session log when review has no findings", () => { + const noFindingsMd = "## Findings\nNo findings."; + const reviewPath = path.join(tmpDir, "empty-review.md"); + writeFileSync(reviewPath, noFindingsMd); + + execSync( + `npx tsx ${path.resolve(".codex/skills/agentic-content-review/scripts/parse-review-and-score.ts")} ${reviewPath} --write-log`, + { cwd: tmpDir, env: { ...process.env } } + ); + + const sessionsDir = path.join(tmpDir, ".vlint", "sessions"); + const files = readdirSync(sessionsDir); + expect(files).toHaveLength(1); + + const log = JSON.parse(readFileSync(path.join(sessionsDir, files[0]!), "utf8")); + expect(log.findingCount).toBe(0); + expect(log.score).toBe(10); + expect(log.findings).toHaveLength(0); + }); }); From 91719d74902eb70dce3064c555375672a053ac86 Mon Sep 17 00:00:00 2001 From: Osho Emmanuel Date: Wed, 22 Apr 2026 11:10:39 +0100 Subject: [PATCH 5/7] fix(skill): fix resolveRuleName path-before-name bug and deduplicate write-log flag parsing --- .../scripts/parse-review-and-score.ts | 35 ++++++++++--------- .../parse-review-and-score.test.ts | 12 +++++++ 2 files changed, 31 insertions(+), 16 deletions(-) diff --git a/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts index 7db92e7..50fe29c 100755 --- a/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts +++ b/.codex/skills/agentic-content-review/scripts/parse-review-and-score.ts @@ -129,27 +129,31 @@ export function resolveRuleName(rulePath: string): string { } const ruleBaseName = path.basename(rulePath); - const lines = content.split(/\r?\n/); - let currentRulePath: string | undefined; - let currentRuleName: string | undefined; + const entries: Array<{ name?: string; path?: string }> = []; + let current: { name?: string; path?: string } | undefined; - for (const rawLine of lines) { + for (const rawLine of content.split(/\r?\n/)) { const withoutComment = rawLine.replace(/\s+#.*$/, "").trim(); if (withoutComment.startsWith("- ")) { - currentRulePath = undefined; - currentRuleName = undefined; + current = {}; + entries.push(current); + const rest = withoutComment.slice(2).trim(); + const nameMatch = rest.match(/^name:\s*(.+)$/); + if (nameMatch) current.name = nameMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); + const pathMatch = rest.match(/^path:\s*(.+)$/); + if (pathMatch) current.path = pathMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); + continue; } - const nameMatch = withoutComment.match(/^name:\s*(.+)$/); - if (nameMatch) currentRuleName = nameMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); - const pathMatch = withoutComment.match(/^path:\s*(.+)$/); - if (pathMatch) currentRulePath = pathMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); - - if (currentRulePath === ruleBaseName && currentRuleName) { - return currentRuleName; + if (current) { + const nameMatch = withoutComment.match(/^name:\s*(.+)$/); + if (nameMatch) current.name = nameMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); + const pathMatch = withoutComment.match(/^path:\s*(.+)$/); + if (pathMatch) current.path = pathMatch[1]!.replace(/^['"]|['"]$/g, "").trim(); } } - return baseName; + const match = entries.find((e) => e.path === ruleBaseName); + return match?.name ?? baseName; } export function buildSessionLog( @@ -486,6 +490,7 @@ function main(): void { const reviewPath = process.argv[2]; const errors: string[] = []; const warnings: Warning[] = []; + const writeLog = parseWriteLogFlag(process.argv.slice(2)); if (!reviewPath) { errors.push("Usage: parse-review-and-score.ts "); @@ -510,7 +515,6 @@ function main(): void { const markdown = readFileSync(resolvedReviewPath, "utf8"); if (isNoFindings(markdown)) { - const writeLog = parseWriteLogFlag(process.argv.slice(2)); if (writeLog) { const sessionsDir = path.join(process.cwd(), ".vlint", "sessions"); const timestamp = new Date().toISOString(); @@ -541,7 +545,6 @@ function main(): void { errors, }; - const writeLog = parseWriteLogFlag(process.argv.slice(2)); if (writeLog) { const sessionsDir = path.join(process.cwd(), ".vlint", "sessions"); const timestamp = new Date().toISOString(); diff --git a/tests/evaluations/parse-review-and-score.test.ts b/tests/evaluations/parse-review-and-score.test.ts index b85db75..b7694fb 100644 --- a/tests/evaluations/parse-review-and-score.test.ts +++ b/tests/evaluations/parse-review-and-score.test.ts @@ -55,6 +55,18 @@ describe("resolveRuleName", () => { const rulePath = path.join(ruleDir, "wordiness.md"); expect(resolveRuleName(rulePath)).toBe("wordiness"); }); + + it("returns name when path appears before name in index entry", () => { + const ruleDir = path.join(tmpDir, "rules"); + mkdirSync(ruleDir); + writeFileSync(path.join(ruleDir, "passive-voice.md"), "# Passive Voice rule"); + writeFileSync( + path.join(ruleDir, "rule-index.yml"), + `pack: default\nactive: true\nrules:\n - id: passive-voice\n path: passive-voice.md\n name: Passive Voice\n active: true\n` + ); + const rulePath = path.join(ruleDir, "passive-voice.md"); + expect(resolveRuleName(rulePath)).toBe("Passive Voice"); + }); }); describe("buildSessionLog", () => { From 8ee0152489b51cd4a1a92fe4d0b4d9854bd3db6c Mon Sep 17 00:00:00 2001 From: Osho Emmanuel Date: Wed, 22 Apr 2026 11:12:59 +0100 Subject: [PATCH 6/7] feat(skill): append --write-log to parser command in SKILL.md --- .codex/skills/agentic-content-review/SKILL.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.codex/skills/agentic-content-review/SKILL.md b/.codex/skills/agentic-content-review/SKILL.md index 6f2f12f..fea40fa 100644 --- a/.codex/skills/agentic-content-review/SKILL.md +++ b/.codex/skills/agentic-content-review/SKILL.md @@ -21,6 +21,14 @@ Use this skill to review prose and other content files against active markdown r - `same-anchor groups` means findings that share source file, rule path, Evidence quote, Rule quote, and Line. Treat them as semantic-review warnings only, never as duplicate decisions. - Density scoring is strict by default, based on finding count divided by word count, clamped to a `0-10` range. Recompute the final score after main-agent edits. +## Reviewer Sub-Agent Setup + +If your toolset supports sub-agent definitions, check whether a sub-agent definition named `content-reviewer` exists in your workspace's sub-agent directory. If it does not exist, announce that you are creating it and create it using the content of `references/reviewer-directive.md` as its directive. Do not spin up a sub-agent at this step — only create the definition. + +If your toolset does not support sub-agent definitions, announce it and proceed using your toolset's inline sub-agent capability with `references/reviewer-directive.md` as the prompt. + +--- + ## Workflow 1. Resolve active rules from workspace `.vlint/rules/*/rule-index.yml` files, or from bundled defaults at `.codex/skills/agentic-content-review/rules/default/rule-index.yml` when no workspace indexes exist or the caller explicitly asks for defaults. @@ -58,5 +66,5 @@ npx tsx .codex/skills/agentic-content-review/scripts/list-active-rules.ts +npx tsx .codex/skills/agentic-content-review/scripts/parse-review-and-score.ts --write-log ``` From 85b78c47f127f9b76b34954984822257ae0f10f9 Mon Sep 17 00:00:00 2001 From: Osho Emmanuel Date: Thu, 23 Apr 2026 15:26:52 +0100 Subject: [PATCH 7/7] change docs.json to mint.json for multirepo setup --- docs/{docs.json => mint.json} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/{docs.json => mint.json} (100%) diff --git a/docs/docs.json b/docs/mint.json similarity index 100% rename from docs/docs.json rename to docs/mint.json