From b053027f30742aa3ef39c297b33750318a883f34 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 10:26:41 -0500 Subject: [PATCH 01/18] feat: add python gr2 sync status design slice --- gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md | 178 +++++++++++ gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md | 233 ++++++++++++++ gr2/docs/SYNC-FAILURE-CONTRACT.md | 159 ++++++++++ gr2/python_cli/app.py | 17 ++ gr2/python_cli/platform.py | 234 ++++++++++++++ gr2/python_cli/syncops.py | 356 ++++++++++++++++++++++ 6 files changed, 1177 insertions(+) create mode 100644 gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md create mode 100644 gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md create mode 100644 gr2/docs/SYNC-FAILURE-CONTRACT.md create mode 100644 gr2/python_cli/platform.py create mode 100644 gr2/python_cli/syncops.py diff --git a/gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md b/gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md new file mode 100644 index 0000000..e6d8f71 --- /dev/null +++ b/gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md @@ -0,0 +1,178 @@ +# Assess Sync Adversarial Specs + +Artifact 2 for the Sprint 20 sync lane. + +This document lists the failure-first specs the Python `gr2 sync` implementation +must satisfy before `sync run` is allowed to mutate workspace state. + +## 1. Missing Spec + +Preconditions: +- workspace has no `.grip/workspace_spec.toml` + +Trigger: +- `gr2 sync status ` + +Expected: +- command fails immediately +- error points to `gr2 workspace init` +- no cache, repo, lane, or event state is written + +## 2. Partial Clone Failure + +Preconditions: +- spec declares 3 repos +- repo A and B are reachable +- repo C remote is invalid or unavailable + +Trigger: +- `gr2 sync run` + +Expected: +- planner marks A/B runnable and C failing before execution starts +- execution stops on C if C is in the same phase batch +- result reports: + - A/B success or skipped state explicitly + - C as failure with repo-scoped error payload +- no successful repo update is silently rolled back +- event outbox records partial progress and terminal failure + +Invariant: +- sync never reports all-green on partial workspace failure + +## 3. Dirty Shared Repo + +Preconditions: +- shared repo checkout exists +- uncommitted changes in repo root + +Trigger: +- `gr2 sync status` + +Expected: +- issue `dirty_shared_repo` +- issue blocks sync +- planner does not schedule branch movement or fetch-dependent mutation through + the dirty checkout + +Invariant: +- dirty state wins over convenience + +## 4. Dirty Lane Checkout During Sync + +Preconditions: +- lane checkout exists +- lane repo has uncommitted changes + +Trigger: +- `gr2 sync status` + +Expected: +- issue `dirty_lane_repo` +- issue blocks sync +- planner may still inspect other repos, but lane mutation is blocked + +Invariant: +- lane-local work is never overwritten by workspace sync + +## 5. Conflicting Branch States Across Repos + +Preconditions: +- lane spans repos `app`, `api`, `premium` +- expected branch is `feat/auth` +- `app` is on `feat/auth` +- `api` is behind remote +- `premium` is on a different local branch + +Trigger: +- `gr2 sync status` + +Expected: +- planner reports repo-scoped branch inspection operations +- branch divergence appears as explicit sync issue, not implicit correction +- no automatic branch checkout/rebase in status mode + +Invariant: +- branch alignment must be explicit before mutation + +## 6. Shared Cache Path Conflict + +Preconditions: +- `.grip/cache/repos/.git` exists +- path is not a bare git directory + +Trigger: +- `gr2 sync status` + +Expected: +- issue `cache_path_conflict` +- sync blocks +- planner does not attempt to reuse or overwrite the invalid cache path + +## 7. Invalid Repo Hook Config + +Preconditions: +- shared repo has `.gr2/hooks.toml` +- file does not parse or violates schema + +Trigger: +- `gr2 sync status` + +Expected: +- spec validation fails before sync planning proceeds +- sync status returns blocked with the hook validation error included + +Invariant: +- repo hook errors fail fast at plan time + +## 8. Sync During Active Edit Lease + +Preconditions: +- lane has an active `edit` lease +- lane repo is otherwise clean + +Trigger: +- `gr2 sync run --lane ` + +Expected: +- sync refuses lane mutation for the leased lane +- non-lane workspace inspection may still succeed +- result clearly distinguishes lease-blocked lanes from unrelated workspace + status + +Invariant: +- sync does not tunnel through active edit occupancy + +## 9. Concurrent Sync From Two Worktrees + +Preconditions: +- same workspace available from two operator shells +- both invoke sync against overlapping repos + +Trigger: +- `gr2 sync run` concurrently + +Expected: +- shared mutable resources use explicit lock discipline +- losing side returns machine-readable contention error +- no cache corruption, no partially-written apply metadata + +Invariant: +- concurrency failure is reported, not hidden as random repo damage + +## 10. Platform Backend Failure + +Preconditions: +- `PlatformAdapter` backend is GitHub via `gh` +- `gh` auth is invalid or the command times out + +Trigger: +- sync planner tries to refresh PR/check state + +Expected: +- repo/local sync inspection still reports local status +- platform-dependent operations are marked degraded or failed +- failure is explicit in the result payload + +Invariant: +- adapter failure must not masquerade as clean workspace state diff --git a/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md new file mode 100644 index 0000000..82af582 --- /dev/null +++ b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md @@ -0,0 +1,233 @@ +# Platform Adapter And Sync + +Sprint 20 design lane for: + +- `PlatformAdapter` protocol +- GitHub-only shipping backend for `gr2 2.0` +- sync algorithm for cross-repo orchestration + +Required companion artifacts for this design: + +- adversarial failing specs: + [ASSESS-SYNC-ADVERSARIAL-SPECS.md](./ASSESS-SYNC-ADVERSARIAL-SPECS.md) +- failure/rollback contract: + [SYNC-FAILURE-CONTRACT.md](./SYNC-FAILURE-CONTRACT.md) + +## 1. Scope + +`gr2` owns cross-repo orchestration in OSS: + +- workspace spec +- materialization +- sync +- lanes +- aggregated status +- PR orchestration + +Single-repo git remains raw git. + +Platform integration is intentionally narrow: + +- ship GitHub only first +- hide platform details behind a protocol +- let future GitLab / Azure / Bitbucket adapters arrive later without changing `gr2` UX + +## 2. Adapter Contract + +`gr2/python_cli/platform.py` defines the protocol: + +- `create_pr` +- `merge_pr` +- `pr_status` +- `list_prs` +- `pr_checks` + +The CLI consumes the protocol only. It does not talk to GitHub directly. + +### Shipping backend + +The first backend is `GitHubAdapter`, implemented on top of `gh` CLI. + +Reasoning: + +- simplest path to production +- no custom API client to maintain +- reuses existing authenticated operator environment +- keeps platform logic thin while we prove the orchestration UX + +### Future plugin path + +The adapter boundary is intentionally protocol-shaped, not GitHub-shaped. + +That makes third-party adapters possible later: + +- config-based adapter selection +- module import / entry-point registration +- same `gr2` PR commands, different backend implementation + +## 3. Required Spawn-Readiness Seams + +For premium spawn to move on top of `gr2`, these are required: + +- hook invocation API with stable structured results +- workspace / lane event outbox +- leases and lane metadata +- `exec status` and `exec run` +- machine-readable failure surfaces + +These are not optional polish. They are spawn prerequisites. + +## 4. Sync Goals + +`sync` is the missing orchestration surface between: + +- spec/plan/apply +- lane state +- repo caches +- review/PR flow + +`sync` must be: + +- safe with dirty state +- lane-aware +- explicit about what it mutates +- resumable after partial failure + +## 5. Sync Phases + +### Phase A: Inspect + +Read: + +- workspace spec +- shared repo cache state +- shared repo checkout state +- lane metadata +- lease state +- hook configs + +Emit a workspace-level snapshot: + +- missing repos +- stale caches +- dirty repos +- lane checkouts missing +- lane branches behind remote +- hook config errors + +### Phase B: Plan + +Build a sync plan with explicit operations: + +- refresh repo cache +- fast-forward shared repo +- materialize missing repo +- refresh lane branch +- block on dirty state +- block on conflicting lease +- surface manual action required + +No mutation yet. + +### Phase C: Execute + +Apply only safe operations by default: + +- fetch/update cache +- clone missing repo +- materialize missing lane checkout +- fast-forward clean branches + +Unsafe operations must block unless explicitly requested: + +- dirty shared repo +- dirty lane checkout +- branch divergence requiring merge/rebase +- hook failure with `on_failure = block` + +### Phase D: Emit + +Write: + +- structured sync result +- event outbox entries +- updated aggregated status snapshot + +This is the seam premium and QA will consume. + +## 6. Sync Safety Rules + +1. Dirty state wins over convenience. + If a repo is dirty, `sync` blocks instead of mutating through it. + +2. Lanes are first-class. + `sync` must treat shared repos and lane checkouts differently. + +3. Shared repo cache is substrate, not UX. + Mutations there should be invisible unless they affect user work. + +4. Partial failure must be reportable. + Example: 3 of 5 repos updated, 1 blocked dirty, 1 platform failure. + +5. Event emission is part of correctness. + `sync` must emit enough machine-readable state for premium spawn and QA. + +## 7. Proposed Command Shapes + +Initial surfaces: + +- `gr2 sync status` +- `gr2 sync run` + +Possible later flags: + +- `--lane ` +- `--owner-unit ` +- `--refresh-prs` +- `--allow-dirty-stash` +- `--json` + +`sync status` should be the dry-run/default read path. + +`sync run` should consume the same planner output and execute allowed operations. + +## 8. Failure Scenarios The QA Arena Must Cover + +- dirty shared repo during sync +- dirty lane checkout during sync +- lane branch behind remote +- lane branch diverged from remote +- `gh` timeout during PR create/status +- partial repo refresh failure +- hook failure during sync-triggered materialization +- concurrent sync from two worktrees +- sync during active edit lease + +These are required Sprint 20 QA inputs, not later hardening. + +## 9. Implementation Ordering + +I agree with Layne's platform-first ordering, with one constraint: + +1. `PlatformAdapter` protocol + `GitHubAdapter` +2. sync algorithm design with event outbox requirements folded in +3. aggregated status +4. PR create/status/merge on the adapter +5. lane switch/list polish + +Rationale: + +- PR lifecycle should not be implemented before the adapter boundary exists +- sync and aggregated status share most of the same inspection model +- event outbox requirements need to be considered while designing sync, not bolted on later + +## 10. Non-Goals + +Not part of Sprint 20 `gr2` OSS: + +- single-repo git porcelain +- spawn/agent orchestration +- release flow +- multi-platform support beyond GitHub + +Those would either duplicate raw git or blur the OSS/premium boundary. diff --git a/gr2/docs/SYNC-FAILURE-CONTRACT.md b/gr2/docs/SYNC-FAILURE-CONTRACT.md new file mode 100644 index 0000000..0a31180 --- /dev/null +++ b/gr2/docs/SYNC-FAILURE-CONTRACT.md @@ -0,0 +1,159 @@ +# Sync Failure Contract + +Artifact 3 for the Sprint 20 sync lane. + +This contract defines what Python `gr2 sync` is allowed to do on failure, what +it must report, and what it must never attempt to hide. + +## 1. Core Rule + +`sync status` is read-only. + +`sync run` may mutate workspace state, but it must never pretend a partial +failure is a rollback-complete success. + +## 2. Mutation Model + +`sync` operates in ordered phases: + +1. inspect +2. plan +3. execute +4. emit result + outbox events + +Within a phase, successful mutations are durable unless the operation itself has +an explicit local rollback mechanism. + +Examples: +- a completed `git fetch` is durable +- a completed cache refresh is durable +- a completed clone is durable +- a completed branch checkout is durable + +These are not automatically rolled back just because a later repo fails. + +## 3. Default Failure Behavior + +On the first blocking failure in `sync run`: + +- stop scheduling new mutating operations in the current batch +- preserve already-completed successful operations +- report all completed work explicitly +- report the blocking failure explicitly +- write an event/outbox record describing the partial state + +The contract is: +- stop +- preserve +- report + +Not: +- guess +- continue blindly +- fabricate rollback + +## 4. Dirty State + +Dirty state is a pre-execution blocker by default. + +If a shared repo or lane checkout is dirty: +- `sync status` returns `blocked` +- `sync run` must not mutate through that checkout unless the command explicitly + supports a later opt-in dirty-state strategy + +Initial Sprint 20 contract: +- no implicit stash +- no implicit commit +- no implicit reset + +## 5. Partial State Contract + +If `sync run` partially succeeds: + +- result status is `partial_failure` +- result contains: + - completed operations + - blocked operations + - failed operations + - unaffected operations, if known +- event outbox must include: + - `sync_started` + - one event per completed mutation + - `sync_failed` + +Consumers must be able to reconstruct: +- what changed +- what did not change +- what needs human or agent follow-up + +## 6. Rollback Rules + +Default rule: +- no automatic workspace-wide rollback + +Reason: +- cross-repo rollback is not reliably safe +- later repos may fail after earlier repos perform valid, independent updates +- forcing rollback would risk clobbering legitimate state + +Allowed rollback only when all of the following are true: +- rollback scope is local to one operation +- rollback is deterministic +- rollback result can be verified immediately +- rollback failure is itself reportable + +Examples of acceptable local rollback candidates later: +- removing a just-created empty metadata file +- deleting a just-created lane marker that has no downstream references yet + +Examples not allowed by default: +- resetting git refs across multiple repos +- auto-restoring stashes across partially-mutated lane trees +- deleting refreshed caches because a later repo failed + +## 7. Error Reporting Contract + +Every blocking failure must carry: +- `code` +- `scope` +- `subject` +- human-readable `message` +- machine-readable `details` when available + +Every sync result must distinguish: +- `blocked` from policy/safety preconditions +- `failed` from runtime execution errors +- `partial_failure` from all-or-nothing failure + +## 8. Lease and Occupancy Contract + +If sync encounters an active conflicting lease: +- it is a blocker, not a warning +- sync does not override or steal the lease +- result points to the owning actor and lease mode when available + +If a stale lease policy is added later, it must be explicit and separately +authorized. It is not part of the default sync contract. + +## 9. Platform Adapter Failure Contract + +If the `PlatformAdapter` backend fails: +- local repo and lane inspection still completes when possible +- platform-derived fields are marked degraded/failed +- sync status must not silently omit missing platform data + +GitHub via `gh` is treated as an external dependency: +- failures are surfaced +- not normalized away + +## 10. Operator Expectations + +When `sync` fails, the operator should be able to answer: + +1. what changed? +2. what did not change? +3. what blocked the next step? +4. what is safe to retry? + +If the result payload cannot answer those four questions, the sync surface is +not ready for production mutation. diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index 2876343..ae333f3 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -11,6 +11,7 @@ from . import execops from . import migration +from . import syncops from .gitops import ( branch_exists, checkout_branch, @@ -38,6 +39,7 @@ workspace_app = typer.Typer(help="Workspace bootstrap and materialization") spec_app = typer.Typer(help="Declarative workspace spec operations") exec_app = typer.Typer(help="Lane-aware execution planning and execution") +sync_app = typer.Typer(help="Workspace-wide sync inspection and execution") app.add_typer(repo_app, name="repo") app.add_typer(lane_app, name="lane") @@ -46,6 +48,7 @@ app.add_typer(workspace_app, name="workspace") app.add_typer(spec_app, name="spec") app.add_typer(exec_app, name="exec") +app.add_typer(sync_app, name="sync") def _workspace_repo_spec(workspace_root: Path, repo_name: str) -> dict[str, object]: @@ -249,6 +252,20 @@ def _exit(code: int) -> None: raise typer.Exit(code=code) +@sync_app.command("status") +def sync_status( + workspace_root: Path, + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Inspect workspace-wide sync readiness without mutating any repo state.""" + workspace_root = workspace_root.resolve() + plan = syncops.build_sync_plan(workspace_root) + if json_output: + typer.echo(syncops.sync_status_json(workspace_root)) + return + typer.echo(syncops.render_sync_plan(plan)) + + @workspace_app.command("init") def workspace_init( workspace_root: Path, diff --git a/gr2/python_cli/platform.py b/gr2/python_cli/platform.py new file mode 100644 index 0000000..ee18a7f --- /dev/null +++ b/gr2/python_cli/platform.py @@ -0,0 +1,234 @@ +from __future__ import annotations + +import json +import shutil +import subprocess +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Protocol + + +@dataclass(frozen=True) +class PRRef: + repo: str + number: int | None = None + url: str | None = None + head_branch: str | None = None + base_branch: str | None = None + title: str | None = None + + def as_dict(self) -> dict[str, object]: + return asdict(self) + + +@dataclass(frozen=True) +class PRCheck: + name: str + status: str + conclusion: str | None = None + details_url: str | None = None + + def as_dict(self) -> dict[str, object]: + return asdict(self) + + +@dataclass(frozen=True) +class PRStatus: + ref: PRRef + state: str + mergeable: str | None = None + checks: list[PRCheck] = field(default_factory=list) + + def as_dict(self) -> dict[str, object]: + return { + "ref": self.ref.as_dict(), + "state": self.state, + "mergeable": self.mergeable, + "checks": [item.as_dict() for item in self.checks], + } + + +@dataclass(frozen=True) +class CreatePRRequest: + repo: str + title: str + body: str + head_branch: str + base_branch: str + draft: bool = False + + +class PlatformAdapter(Protocol): + """Protocol for platform-backed PR orchestration. + + gr2 owns the orchestration UX. Adapters hide the hosting platform backend. + """ + + name: str + + def create_pr(self, request: CreatePRRequest) -> PRRef: ... + + def merge_pr(self, repo: str, number: int) -> PRRef: ... + + def pr_status(self, repo: str, number: int) -> PRStatus: ... + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: ... + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: ... + + +class AdapterError(RuntimeError): + pass + + +def _run_json(command: list[str], *, cwd: Path | None = None) -> object: + proc = subprocess.run( + command, + cwd=cwd, + capture_output=True, + text=True, + check=False, + ) + if proc.returncode != 0: + raise AdapterError(proc.stderr.strip() or proc.stdout.strip() or f"command failed: {' '.join(command)}") + try: + return json.loads(proc.stdout) + except json.JSONDecodeError as exc: + raise AdapterError(f"command did not return valid json: {' '.join(command)}") from exc + + +class GitHubAdapter: + name = "github" + + def __init__(self, gh_binary: str = "gh") -> None: + if shutil.which(gh_binary) is None: + raise AdapterError(f"`{gh_binary}` not found in PATH") + self.gh_binary = gh_binary + + def create_pr(self, request: CreatePRRequest) -> PRRef: + cmd = [ + self.gh_binary, + "pr", + "create", + "--repo", + request.repo, + "--title", + request.title, + "--body", + request.body, + "--head", + request.head_branch, + "--base", + request.base_branch, + ] + if request.draft: + cmd.append("--draft") + proc = subprocess.run(cmd, capture_output=True, text=True, check=False) + if proc.returncode != 0: + raise AdapterError(proc.stderr.strip() or proc.stdout.strip() or "gh pr create failed") + url = proc.stdout.strip() + return PRRef( + repo=request.repo, + url=url or None, + head_branch=request.head_branch, + base_branch=request.base_branch, + title=request.title, + ) + + def merge_pr(self, repo: str, number: int) -> PRRef: + proc = subprocess.run( + [self.gh_binary, "pr", "merge", str(number), "--repo", repo], + capture_output=True, + text=True, + check=False, + ) + if proc.returncode != 0: + raise AdapterError(proc.stderr.strip() or proc.stdout.strip() or "gh pr merge failed") + return PRRef(repo=repo, number=number) + + def pr_status(self, repo: str, number: int) -> PRStatus: + payload = _run_json( + [ + self.gh_binary, + "pr", + "view", + str(number), + "--repo", + repo, + "--json", + "number,url,headRefName,baseRefName,title,state,mergeable,statusCheckRollup", + ] + ) + assert isinstance(payload, dict) + checks = self._parse_checks(payload.get("statusCheckRollup") or []) + ref = PRRef( + repo=repo, + number=payload.get("number"), + url=payload.get("url"), + head_branch=payload.get("headRefName"), + base_branch=payload.get("baseRefName"), + title=payload.get("title"), + ) + return PRStatus( + ref=ref, + state=str(payload.get("state", "UNKNOWN")), + mergeable=str(payload.get("mergeable")) if payload.get("mergeable") is not None else None, + checks=checks, + ) + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: + payload = _run_json( + [ + self.gh_binary, + "pr", + "list", + "--repo", + repo, + "--json", + "number,url,headRefName,baseRefName,title", + ] + ) + assert isinstance(payload, list) + refs: list[PRRef] = [] + for item in payload: + if not isinstance(item, dict): + continue + if head_branch and item.get("headRefName") != head_branch: + continue + refs.append( + PRRef( + repo=repo, + number=item.get("number"), + url=item.get("url"), + head_branch=item.get("headRefName"), + base_branch=item.get("baseRefName"), + title=item.get("title"), + ) + ) + return refs + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: + return self.pr_status(repo, number).checks + + @staticmethod + def _parse_checks(rows: list[object]) -> list[PRCheck]: + checks: list[PRCheck] = [] + for row in rows: + if not isinstance(row, dict): + continue + checks.append( + PRCheck( + name=str(row.get("name", "unknown")), + status=str(row.get("status", "UNKNOWN")), + conclusion=(str(row["conclusion"]) if row.get("conclusion") is not None else None), + details_url=row.get("detailsUrl"), + ) + ) + return checks + + +def get_platform_adapter(name: str) -> PlatformAdapter: + normalized = name.strip().lower() + if normalized in {"github", "gh"}: + return GitHubAdapter() + raise AdapterError(f"unknown platform adapter: {name}") diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py new file mode 100644 index 0000000..6beb364 --- /dev/null +++ b/gr2/python_cli/syncops.py @@ -0,0 +1,356 @@ +from __future__ import annotations + +import dataclasses +import json +from pathlib import Path + +from gr2.prototypes import lane_workspace_prototype as lane_proto + +from .gitops import is_git_dir, is_git_repo, repo_dirty +from .hooks import load_repo_hooks +from .spec_apply import ( + ValidationIssue, + load_workspace_spec_doc, + repo_cache_path, + validate_spec, + workspace_spec_path, +) + + +@dataclasses.dataclass(frozen=True) +class SyncIssue: + level: str + code: str + scope: str + subject: str + message: str + blocks: bool + path: str | None = None + details: dict[str, object] = dataclasses.field(default_factory=dict) + + def as_dict(self) -> dict[str, object]: + return dataclasses.asdict(self) + + +@dataclasses.dataclass(frozen=True) +class SyncOperation: + kind: str + scope: str + subject: str + target_path: str + reason: str + details: dict[str, object] = dataclasses.field(default_factory=dict) + + def as_dict(self) -> dict[str, object]: + return dataclasses.asdict(self) + + +@dataclasses.dataclass(frozen=True) +class SyncPlan: + workspace_root: str + spec_path: str + status: str + issues: list[SyncIssue] + operations: list[SyncOperation] + + def as_dict(self) -> dict[str, object]: + return { + "workspace_root": self.workspace_root, + "spec_path": self.spec_path, + "status": self.status, + "issue_count": len(self.issues), + "operation_count": len(self.operations), + "issues": [item.as_dict() for item in self.issues], + "operations": [item.as_dict() for item in self.operations], + } + + +@dataclasses.dataclass(frozen=True) +class SyncResult: + workspace_root: str + status: str + plan_status: str + applied: list[str] + blocked: list[SyncIssue] + failures: list[SyncIssue] + rollback_contract: str + + def as_dict(self) -> dict[str, object]: + return { + "workspace_root": self.workspace_root, + "status": self.status, + "plan_status": self.plan_status, + "applied": list(self.applied), + "blocked": [item.as_dict() for item in self.blocked], + "failures": [item.as_dict() for item in self.failures], + "rollback_contract": self.rollback_contract, + } + + +def _spec_issue_to_sync(issue: ValidationIssue) -> SyncIssue: + return SyncIssue( + level=issue.level, + code=issue.code, + scope="workspace_spec", + subject=issue.path or "workspace_spec", + message=issue.message, + blocks=issue.level == "error", + path=issue.path, + ) + + +def _iter_lane_docs(workspace_root: Path) -> list[tuple[str, str, dict[str, object]]]: + lanes_root = workspace_root / "agents" + docs: list[tuple[str, str, dict[str, object]]] = [] + if not lanes_root.exists(): + return docs + for owner_dir in sorted(lanes_root.iterdir()): + lane_parent = owner_dir / "lanes" + if not lane_parent.is_dir(): + continue + for lane_dir in sorted(lane_parent.iterdir()): + lane_toml = lane_dir / "lane.toml" + if not lane_toml.exists(): + continue + try: + doc = lane_proto.load_lane_doc(workspace_root, owner_dir.name, lane_dir.name) + except Exception as exc: # pragma: no cover - defensive against prototype parser issues + docs.append( + ( + owner_dir.name, + lane_dir.name, + { + "lane_name": lane_dir.name, + "owner_unit": owner_dir.name, + "_load_error": str(exc), + }, + ) + ) + continue + docs.append((owner_dir.name, lane_dir.name, doc)) + return docs + + +def _status_from_issues(issues: list[SyncIssue]) -> str: + if any(item.blocks for item in issues): + return "blocked" + if issues: + return "attention" + return "ready" + + +def build_sync_plan(workspace_root: Path) -> SyncPlan: + workspace_root = workspace_root.resolve() + spec_path = workspace_spec_path(workspace_root) + if not spec_path.exists(): + raise SystemExit( + f"workspace spec not found: {spec_path}\n" + "run `gr2 workspace init ` first or create .grip/workspace_spec.toml explicitly" + ) + + issues: list[SyncIssue] = [] + operations: list[SyncOperation] = [] + + issues.extend(_spec_issue_to_sync(issue) for issue in validate_spec(workspace_root)) + if any(item.blocks for item in issues): + return SyncPlan( + workspace_root=str(workspace_root), + spec_path=str(spec_path), + status=_status_from_issues(issues), + issues=issues, + operations=operations, + ) + + spec = load_workspace_spec_doc(workspace_root) + for repo in spec.get("repos", []): + repo_name = str(repo["name"]) + repo_root = workspace_root / str(repo["path"]) + cache_root = repo_cache_path(workspace_root, repo_name) + + if not cache_root.exists(): + operations.append( + SyncOperation( + kind="seed_repo_cache", + scope="repo_cache", + subject=repo_name, + target_path=str(cache_root), + reason="shared repo cache missing", + details={"url": str(repo["url"])}, + ) + ) + elif not is_git_dir(cache_root): + issues.append( + SyncIssue( + level="error", + code="cache_path_conflict", + scope="repo_cache", + subject=repo_name, + message=f"repo cache path exists but is not a bare git dir: {cache_root}", + blocks=True, + path=str(cache_root), + ) + ) + else: + operations.append( + SyncOperation( + kind="refresh_repo_cache", + scope="repo_cache", + subject=repo_name, + target_path=str(cache_root), + reason="shared repo cache present; refresh remote state", + details={"url": str(repo["url"])}, + ) + ) + + if not repo_root.exists(): + operations.append( + SyncOperation( + kind="clone_shared_repo", + scope="shared_repo", + subject=repo_name, + target_path=str(repo_root), + reason="shared repo checkout missing", + details={"url": str(repo["url"])}, + ) + ) + elif not is_git_repo(repo_root): + issues.append( + SyncIssue( + level="error", + code="shared_repo_path_conflict", + scope="shared_repo", + subject=repo_name, + message=f"shared repo path exists but is not a git repo: {repo_root}", + blocks=True, + path=str(repo_root), + ) + ) + else: + if repo_dirty(repo_root): + issues.append( + SyncIssue( + level="error", + code="dirty_shared_repo", + scope="shared_repo", + subject=repo_name, + message=f"shared repo has uncommitted changes and blocks sync: {repo_root}", + blocks=True, + path=str(repo_root), + ) + ) + hooks = load_repo_hooks(repo_root) + if hooks: + operations.append( + SyncOperation( + kind="evaluate_repo_hooks", + scope="shared_repo", + subject=repo_name, + target_path=str(repo_root), + reason="repo hook config present; sync must account for lifecycle/policy rules", + details={"hook_config": str(repo_root / ".gr2" / "hooks.toml")}, + ) + ) + + for owner_unit, lane_name, lane_doc in _iter_lane_docs(workspace_root): + if lane_doc.get("_load_error"): + issues.append( + SyncIssue( + level="error", + code="lane_doc_load_failed", + scope="lane", + subject=f"{owner_unit}/{lane_name}", + message=f"failed to load lane metadata: {lane_doc['_load_error']}", + blocks=True, + path=str(workspace_root / "agents" / owner_unit / "lanes" / lane_name / "lane.toml"), + ) + ) + continue + + lane_root = lane_proto.lane_dir(workspace_root, owner_unit, lane_name) + for repo_name in lane_doc.get("repos", []): + lane_repo_root = lane_root / "repos" / str(repo_name) + expected_branch = str(dict(lane_doc.get("branch_map", {})).get(repo_name, "")) + if not lane_repo_root.exists(): + operations.append( + SyncOperation( + kind="materialize_lane_repo", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + target_path=str(lane_repo_root), + reason="lane checkout missing", + details={"expected_branch": expected_branch}, + ) + ) + continue + if not is_git_repo(lane_repo_root): + issues.append( + SyncIssue( + level="error", + code="lane_repo_path_conflict", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + message=f"lane repo path exists but is not a git repo: {lane_repo_root}", + blocks=True, + path=str(lane_repo_root), + ) + ) + continue + if repo_dirty(lane_repo_root): + issues.append( + SyncIssue( + level="error", + code="dirty_lane_repo", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + message=f"lane repo has uncommitted changes and blocks sync: {lane_repo_root}", + blocks=True, + path=str(lane_repo_root), + details={"expected_branch": expected_branch}, + ) + ) + operations.append( + SyncOperation( + kind="inspect_lane_repo_branch", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + target_path=str(lane_repo_root), + reason="lane checkout present; verify branch alignment before any sync run", + details={"expected_branch": expected_branch}, + ) + ) + + return SyncPlan( + workspace_root=str(workspace_root), + spec_path=str(spec_path), + status=_status_from_issues(issues), + issues=issues, + operations=operations, + ) + + +def render_sync_plan(plan: SyncPlan) -> str: + lines = [ + "SyncPlan", + f"workspace_root = {plan.workspace_root}", + f"status = {plan.status}", + f"issue_count = {len(plan.issues)}", + f"operation_count = {len(plan.operations)}", + ] + if plan.issues: + lines.append("ISSUES") + for issue in plan.issues: + subject = f" [{issue.subject}]" if issue.subject else "" + lines.append(f"- {issue.level}:{issue.code}{subject} {issue.message}") + if plan.operations: + lines.append("OPERATIONS") + for op in plan.operations: + lines.append(f"- {op.kind} [{op.scope}] {op.subject} -> {op.target_path} ({op.reason})") + return "\n".join(lines) + + +def sync_status_payload(workspace_root: Path) -> dict[str, object]: + return build_sync_plan(workspace_root).as_dict() + + +def sync_status_json(workspace_root: Path) -> str: + return json.dumps(sync_status_payload(workspace_root), indent=2) From 669c74715e1c118ac5923898dbd8d860ccdbfad3 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 10:33:28 -0500 Subject: [PATCH 02/18] feat: add python gr2 sync run executor --- gr2/python_cli/app.py | 16 +++++ gr2/python_cli/gitops.py | 7 ++ gr2/python_cli/syncops.py | 146 +++++++++++++++++++++++++++++++++++++- 3 files changed, 168 insertions(+), 1 deletion(-) diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index ae333f3..e8811aa 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -266,6 +266,22 @@ def sync_status( typer.echo(syncops.render_sync_plan(plan)) +@sync_app.command("run") +def sync_run( + workspace_root: Path, + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Execute the current sync plan, stopping on the first blocking runtime failure.""" + workspace_root = workspace_root.resolve() + result = syncops.run_sync(workspace_root) + if json_output: + typer.echo(json.dumps(result.as_dict(), indent=2)) + else: + typer.echo(syncops.render_sync_result(result)) + if result.status in {"blocked", "failed", "partial_failure"}: + raise typer.Exit(code=1) + + @workspace_app.command("init") def workspace_init( workspace_root: Path, diff --git a/gr2/python_cli/gitops.py b/gr2/python_cli/gitops.py index 8fca724..3aa3f26 100644 --- a/gr2/python_cli/gitops.py +++ b/gr2/python_cli/gitops.py @@ -145,6 +145,13 @@ def checkout_branch(repo_root: Path, branch: str) -> None: raise SystemExit(f"failed to checkout {branch} in {repo_root}:\n{proc.stderr or proc.stdout}") +def current_branch(repo_root: Path) -> str: + proc = git(repo_root, "branch", "--show-current") + if proc.returncode != 0: + raise SystemExit(f"failed to determine current branch in {repo_root}:\n{proc.stderr or proc.stdout}") + return proc.stdout.strip() + + def stash_if_dirty(repo_root: Path, message: str) -> bool: if not repo_dirty(repo_root): return False diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 6beb364..33a7bd8 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -6,10 +6,13 @@ from gr2.prototypes import lane_workspace_prototype as lane_proto -from .gitops import is_git_dir, is_git_repo, repo_dirty +from .gitops import current_branch, ensure_lane_checkout, ensure_repo_cache, is_git_dir, is_git_repo, repo_dirty, clone_repo from .hooks import load_repo_hooks from .spec_apply import ( ValidationIssue, + _run_materialize_hooks, + _find_repo, + _record_apply_state, load_workspace_spec_doc, repo_cache_path, validate_spec, @@ -17,6 +20,12 @@ ) +SYNC_ROLLBACK_CONTRACT = ( + "sync preserves completed operations, stops on blocking failure, and reports partial state explicitly; " + "it does not attempt automatic cross-repo rollback" +) + + @dataclasses.dataclass(frozen=True) class SyncIssue: level: str @@ -354,3 +363,138 @@ def sync_status_payload(workspace_root: Path) -> dict[str, object]: def sync_status_json(workspace_root: Path) -> str: return json.dumps(sync_status_payload(workspace_root), indent=2) + + +def _issue_from_exception(op: SyncOperation, exc: BaseException) -> SyncIssue: + message = str(exc).strip() or f"sync operation failed: {op.kind}" + return SyncIssue( + level="error", + code=f"{op.kind}_failed", + scope=op.scope, + subject=op.subject, + message=message, + blocks=True, + path=op.target_path, + details={"operation": op.kind}, + ) + + +def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOperation) -> str: + if op.kind in {"seed_repo_cache", "refresh_repo_cache"}: + repo_spec = _find_repo(spec, op.subject) + cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) + created = ensure_repo_cache(str(repo_spec["url"]), cache_path) + if op.kind == "seed_repo_cache": + return f"seeded repo cache for '{op.subject}' at {cache_path}" + if created: + return f"seeded repo cache for '{op.subject}' at {cache_path}" + return f"refreshed repo cache for '{op.subject}' at {cache_path}" + + if op.kind == "clone_shared_repo": + repo_spec = _find_repo(spec, op.subject) + repo_root = workspace_root / str(repo_spec["path"]) + cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) + first_materialize = clone_repo(str(repo_spec["url"]), repo_root, reference_repo_root=cache_path) + _run_materialize_hooks(workspace_root, repo_root, str(repo_spec["name"]), first_materialize, manual_hooks=False) + return f"cloned shared repo '{op.subject}' into {repo_root}" + + if op.kind == "evaluate_repo_hooks": + repo_root = Path(op.target_path) + hooks = load_repo_hooks(repo_root) + if hooks: + return f"validated repo hooks for '{op.subject}'" + return f"no repo hooks for '{op.subject}'" + + if op.kind == "materialize_lane_repo": + owner_and_lane, repo_name = op.subject.split(":", 1) + owner_unit, lane_name = owner_and_lane.split("/", 1) + repo_spec = _find_repo(spec, repo_name) + source_repo_root = workspace_root / str(repo_spec["path"]) + target_repo_root = Path(op.target_path) + expected_branch = str(op.details.get("expected_branch", "")) + first_materialize = ensure_lane_checkout( + source_repo_root=source_repo_root, + target_repo_root=target_repo_root, + branch=expected_branch, + ) + _run_materialize_hooks(workspace_root, target_repo_root, repo_name, first_materialize, manual_hooks=False) + return f"materialized lane repo '{op.subject}' at {target_repo_root}" + + if op.kind == "inspect_lane_repo_branch": + expected_branch = str(op.details.get("expected_branch", "")).strip() + repo_root = Path(op.target_path) + actual_branch = current_branch(repo_root) + if expected_branch and actual_branch != expected_branch: + raise SystemExit( + f"lane repo branch mismatch for {op.subject}: expected {expected_branch}, found {actual_branch}" + ) + return f"verified lane branch for '{op.subject}' ({actual_branch or '-'})" + + raise SystemExit(f"unsupported sync operation kind: {op.kind}") + + +def run_sync(workspace_root: Path) -> SyncResult: + workspace_root = workspace_root.resolve() + plan = build_sync_plan(workspace_root) + blocked = [issue for issue in plan.issues if issue.blocks] + if blocked: + return SyncResult( + workspace_root=str(workspace_root), + status="blocked", + plan_status=plan.status, + applied=[], + blocked=blocked, + failures=[], + rollback_contract=SYNC_ROLLBACK_CONTRACT, + ) + + spec = load_workspace_spec_doc(workspace_root) + applied: list[str] = [] + failures: list[SyncIssue] = [] + for op in plan.operations: + try: + applied.append(_execute_operation(workspace_root, spec, op)) + except BaseException as exc: + failures.append(_issue_from_exception(op, exc)) + break + + if applied: + _record_apply_state(workspace_root, applied) + + status = "success" + if failures and applied: + status = "partial_failure" + elif failures: + status = "failed" + + return SyncResult( + workspace_root=str(workspace_root), + status=status, + plan_status=plan.status, + applied=applied, + blocked=[], + failures=failures, + rollback_contract=SYNC_ROLLBACK_CONTRACT, + ) + + +def render_sync_result(result: SyncResult) -> str: + lines = [ + "SyncResult", + f"workspace_root = {result.workspace_root}", + f"status = {result.status}", + f"plan_status = {result.plan_status}", + f"applied_count = {len(result.applied)}", + f"failure_count = {len(result.failures)}", + ] + if result.applied: + lines.append("APPLIED") + lines.extend(f"- {item}" for item in result.applied) + if result.blocked: + lines.append("BLOCKED") + lines.extend(f"- {item.code}: {item.message}" for item in result.blocked) + if result.failures: + lines.append("FAILURES") + lines.extend(f"- {item.code}: {item.message}" for item in result.failures) + lines.append(f"rollback_contract = {result.rollback_contract}") + return "\n".join(lines) From ddc81749bd19202fc5f59c00e4c1e2f3a94a3081 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 10:43:39 -0500 Subject: [PATCH 03/18] feat: harden python gr2 sync semantics --- gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md | 8 +- gr2/docs/SYNC-FAILURE-CONTRACT.md | 20 +- gr2/python_cli/app.py | 8 +- gr2/python_cli/gitops.py | 20 ++ gr2/python_cli/syncops.py | 361 ++++++++++++++++++++++---- 5 files changed, 353 insertions(+), 64 deletions(-) diff --git a/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md index 82af582..4aa1527 100644 --- a/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md +++ b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md @@ -157,8 +157,9 @@ This is the seam premium and QA will consume. ## 6. Sync Safety Rules -1. Dirty state wins over convenience. - If a repo is dirty, `sync` blocks instead of mutating through it. +1. Dirty state is explicit, not implicit. + `sync` accepts `--dirty=stash|block|discard`. + Default is `stash`, per Sprint 20 ruling. 2. Lanes are first-class. `sync` must treat shared repos and lane checkouts differently. @@ -171,6 +172,7 @@ This is the seam premium and QA will consume. 5. Event emission is part of correctness. `sync` must emit enough machine-readable state for premium spawn and QA. + Emit failure does not block the parent operation. ## 7. Proposed Command Shapes @@ -184,7 +186,7 @@ Possible later flags: - `--lane ` - `--owner-unit ` - `--refresh-prs` -- `--allow-dirty-stash` +- `--dirty=stash|block|discard` - `--json` `sync status` should be the dry-run/default read path. diff --git a/gr2/docs/SYNC-FAILURE-CONTRACT.md b/gr2/docs/SYNC-FAILURE-CONTRACT.md index 0a31180..885e541 100644 --- a/gr2/docs/SYNC-FAILURE-CONTRACT.md +++ b/gr2/docs/SYNC-FAILURE-CONTRACT.md @@ -54,17 +54,21 @@ Not: ## 4. Dirty State -Dirty state is a pre-execution blocker by default. +Dirty handling is explicit through `--dirty=stash|block|discard`. -If a shared repo or lane checkout is dirty: -- `sync status` returns `blocked` -- `sync run` must not mutate through that checkout unless the command explicitly - supports a later opt-in dirty-state strategy +Default: +- `--dirty=stash` -Initial Sprint 20 contract: -- no implicit stash +Behavior: +- `stash`: preserve local work by stashing it before sync mutation proceeds +- `block`: return a blocking dirty-state issue and do not mutate through that + checkout +- `discard`: explicitly discard local changes before sync mutation proceeds + +Rules: - no implicit commit -- no implicit reset +- no dirty-state behavior outside the declared `--dirty` mode +- `discard` is always explicit and never the default ## 5. Partial State Contract diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index e8811aa..be29f21 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -255,13 +255,14 @@ def _exit(code: int) -> None: @sync_app.command("status") def sync_status( workspace_root: Path, + dirty_mode: str = typer.Option("stash", "--dirty", help="Dirty-state handling: stash, block, or discard"), json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), ) -> None: """Inspect workspace-wide sync readiness without mutating any repo state.""" workspace_root = workspace_root.resolve() - plan = syncops.build_sync_plan(workspace_root) + plan = syncops.build_sync_plan(workspace_root, dirty_mode=dirty_mode) if json_output: - typer.echo(syncops.sync_status_json(workspace_root)) + typer.echo(json.dumps(plan.as_dict(), indent=2)) return typer.echo(syncops.render_sync_plan(plan)) @@ -269,11 +270,12 @@ def sync_status( @sync_app.command("run") def sync_run( workspace_root: Path, + dirty_mode: str = typer.Option("stash", "--dirty", help="Dirty-state handling: stash, block, or discard"), json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), ) -> None: """Execute the current sync plan, stopping on the first blocking runtime failure.""" workspace_root = workspace_root.resolve() - result = syncops.run_sync(workspace_root) + result = syncops.run_sync(workspace_root, dirty_mode=dirty_mode) if json_output: typer.echo(json.dumps(result.as_dict(), indent=2)) else: diff --git a/gr2/python_cli/gitops.py b/gr2/python_cli/gitops.py index 3aa3f26..8b10d9b 100644 --- a/gr2/python_cli/gitops.py +++ b/gr2/python_cli/gitops.py @@ -32,6 +32,14 @@ def remote_origin_url(path: Path) -> str | None: return value or None +def current_head_sha(path: Path) -> str | None: + proc = git(path, "rev-parse", "HEAD") + if proc.returncode != 0: + return None + value = proc.stdout.strip() + return value or None + + def ensure_repo_cache(url: str, cache_repo_root: Path) -> bool: """Ensure a local bare mirror exists for a repo URL. @@ -159,3 +167,15 @@ def stash_if_dirty(repo_root: Path, message: str) -> bool: if proc.returncode != 0: raise SystemExit(f"failed to stash dirty work in {repo_root}:\n{proc.stderr or proc.stdout}") return True + + +def discard_if_dirty(repo_root: Path) -> bool: + if not repo_dirty(repo_root): + return False + proc = git(repo_root, "reset", "--hard", "HEAD") + if proc.returncode != 0: + raise SystemExit(f"failed to discard tracked changes in {repo_root}:\n{proc.stderr or proc.stdout}") + proc = git(repo_root, "clean", "-fd") + if proc.returncode != 0: + raise SystemExit(f"failed to discard untracked changes in {repo_root}:\n{proc.stderr or proc.stdout}") + return True diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 33a7bd8..d427ae5 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -1,12 +1,26 @@ from __future__ import annotations import dataclasses +import fcntl import json +import os from pathlib import Path +from datetime import UTC, datetime from gr2.prototypes import lane_workspace_prototype as lane_proto -from .gitops import current_branch, ensure_lane_checkout, ensure_repo_cache, is_git_dir, is_git_repo, repo_dirty, clone_repo +from .gitops import ( + clone_repo, + current_branch, + current_head_sha, + discard_if_dirty, + ensure_lane_checkout, + ensure_repo_cache, + is_git_dir, + is_git_repo, + repo_dirty, + stash_if_dirty, +) from .hooks import load_repo_hooks from .spec_apply import ( ValidationIssue, @@ -24,6 +38,7 @@ "sync preserves completed operations, stops on blocking failure, and reports partial state explicitly; " "it does not attempt automatic cross-repo rollback" ) +VALID_DIRTY_MODES = {"stash", "block", "discard"} @dataclasses.dataclass(frozen=True) @@ -83,6 +98,7 @@ class SyncResult: blocked: list[SyncIssue] failures: list[SyncIssue] rollback_contract: str + operation_id: str | None = None def as_dict(self) -> dict[str, object]: return { @@ -93,6 +109,7 @@ def as_dict(self) -> dict[str, object]: "blocked": [item.as_dict() for item in self.blocked], "failures": [item.as_dict() for item in self.failures], "rollback_contract": self.rollback_contract, + "operation_id": self.operation_id, } @@ -148,8 +165,79 @@ def _status_from_issues(issues: list[SyncIssue]) -> str: return "ready" -def build_sync_plan(workspace_root: Path) -> SyncPlan: +def _normalize_dirty_mode(dirty_mode: str) -> str: + normalized = dirty_mode.strip().lower() + if normalized not in VALID_DIRTY_MODES: + raise SystemExit(f"invalid --dirty value '{dirty_mode}'; expected one of: stash, block, discard") + return normalized + + +def _operation_id() -> str: + return os.urandom(8).hex() + + +def _now_utc() -> str: + return datetime.now(UTC).isoformat() + + +def _events_dir(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" + + +def _outbox_file(workspace_root: Path) -> Path: + return _events_dir(workspace_root) / "outbox.jsonl" + + +def _outbox_lock_file(workspace_root: Path) -> Path: + return _events_dir(workspace_root) / "outbox.lock" + + +def _sync_lock_file(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "state" / "sync.lock" + + +def _append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> None: + outbox_path = _outbox_file(workspace_root) + lock_path = _outbox_lock_file(workspace_root) + outbox_path.parent.mkdir(parents=True, exist_ok=True) + lock_path.parent.mkdir(parents=True, exist_ok=True) + try: + with lock_path.open("a+", encoding="utf-8") as lock_fh: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX) + seq = 1 + if outbox_path.exists(): + with outbox_path.open("r", encoding="utf-8") as existing: + for line in existing: + line = line.strip() + if not line: + continue + try: + row = json.loads(line) + except json.JSONDecodeError: + continue + value = int(row.get("seq", 0)) + if value >= seq: + seq = value + 1 + event = { + "seq": seq, + "event_id": os.urandom(8).hex(), + "timestamp": _now_utc(), + **payload, + } + with outbox_path.open("a", encoding="utf-8") as fh: + fh.write(json.dumps(event) + "\n") + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) + except OSError: + return + + +def _emit_sync_event(workspace_root: Path, payload: dict[str, object]) -> None: + _append_outbox_event(workspace_root, payload) + + +def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncPlan: workspace_root = workspace_root.resolve() + dirty_mode = _normalize_dirty_mode(dirty_mode) spec_path = workspace_spec_path(workspace_root) if not spec_path.exists(): raise SystemExit( @@ -236,17 +324,30 @@ def build_sync_plan(workspace_root: Path) -> SyncPlan: ) else: if repo_dirty(repo_root): - issues.append( - SyncIssue( - level="error", - code="dirty_shared_repo", - scope="shared_repo", - subject=repo_name, - message=f"shared repo has uncommitted changes and blocks sync: {repo_root}", - blocks=True, - path=str(repo_root), + if dirty_mode == "block": + issues.append( + SyncIssue( + level="error", + code="dirty_shared_repo", + scope="shared_repo", + subject=repo_name, + message=f"shared repo has uncommitted changes and blocks sync: {repo_root}", + blocks=True, + path=str(repo_root), + details={"dirty_mode": dirty_mode}, + ) + ) + else: + operations.append( + SyncOperation( + kind="stash_dirty_repo" if dirty_mode == "stash" else "discard_dirty_repo", + scope="shared_repo", + subject=repo_name, + target_path=str(repo_root), + reason=f"shared repo is dirty and will be handled via --dirty={dirty_mode}", + details={"dirty_mode": dirty_mode}, + ) ) - ) hooks = load_repo_hooks(repo_root) if hooks: operations.append( @@ -305,18 +406,30 @@ def build_sync_plan(workspace_root: Path) -> SyncPlan: ) continue if repo_dirty(lane_repo_root): - issues.append( - SyncIssue( - level="error", - code="dirty_lane_repo", - scope="lane", - subject=f"{owner_unit}/{lane_name}:{repo_name}", - message=f"lane repo has uncommitted changes and blocks sync: {lane_repo_root}", - blocks=True, - path=str(lane_repo_root), - details={"expected_branch": expected_branch}, + if dirty_mode == "block": + issues.append( + SyncIssue( + level="error", + code="dirty_lane_repo", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + message=f"lane repo has uncommitted changes and blocks sync: {lane_repo_root}", + blocks=True, + path=str(lane_repo_root), + details={"expected_branch": expected_branch, "dirty_mode": dirty_mode}, + ) + ) + else: + operations.append( + SyncOperation( + kind="stash_dirty_repo" if dirty_mode == "stash" else "discard_dirty_repo", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + target_path=str(lane_repo_root), + reason=f"lane repo is dirty and will be handled via --dirty={dirty_mode}", + details={"expected_branch": expected_branch, "dirty_mode": dirty_mode}, + ) ) - ) operations.append( SyncOperation( kind="inspect_lane_repo_branch", @@ -380,6 +493,8 @@ def _issue_from_exception(op: SyncOperation, exc: BaseException) -> SyncIssue: def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOperation) -> str: + target_path = Path(op.target_path) + before_sha = current_head_sha(target_path) if op.scope in {"shared_repo", "lane"} and target_path.exists() else None if op.kind in {"seed_repo_cache", "refresh_repo_cache"}: repo_spec = _find_repo(spec, op.subject) cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) @@ -396,6 +511,17 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) first_materialize = clone_repo(str(repo_spec["url"]), repo_root, reference_repo_root=cache_path) _run_materialize_hooks(workspace_root, repo_root, str(repo_spec["name"]), first_materialize, manual_hooks=False) + after_sha = current_head_sha(repo_root) + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_updated", + "repo": op.subject, + "scope": "shared_repo", + "old_sha": before_sha, + "new_sha": after_sha, + }, + ) return f"cloned shared repo '{op.subject}' into {repo_root}" if op.kind == "evaluate_repo_hooks": @@ -418,6 +544,20 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp branch=expected_branch, ) _run_materialize_hooks(workspace_root, target_repo_root, repo_name, first_materialize, manual_hooks=False) + after_sha = current_head_sha(target_repo_root) + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_updated", + "repo": repo_name, + "scope": "lane", + "owner_unit": owner_unit, + "lane": lane_name, + "old_sha": before_sha, + "new_sha": after_sha, + "branch": expected_branch, + }, + ) return f"materialized lane repo '{op.subject}' at {target_repo_root}" if op.kind == "inspect_lane_repo_branch": @@ -430,14 +570,117 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp ) return f"verified lane branch for '{op.subject}' ({actual_branch or '-'})" + if op.kind == "stash_dirty_repo": + repo_root = Path(op.target_path) + if stash_if_dirty(repo_root, f"gr2 sync auto-stash: {op.subject}"): + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_skipped", + "repo": op.subject.split(":")[-1], + "scope": op.scope, + "reason": "dirty_stashed", + }, + ) + return f"stashed dirty repo state for '{op.subject}'" + return f"repo already clean for '{op.subject}'" + + if op.kind == "discard_dirty_repo": + repo_root = Path(op.target_path) + if discard_if_dirty(repo_root): + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_skipped", + "repo": op.subject.split(":")[-1], + "scope": op.scope, + "reason": "dirty_discarded", + }, + ) + return f"discarded dirty repo state for '{op.subject}'" + return f"repo already clean for '{op.subject}'" + raise SystemExit(f"unsupported sync operation kind: {op.kind}") -def run_sync(workspace_root: Path) -> SyncResult: +def _acquire_sync_lock(workspace_root: Path): + lock_path = _sync_lock_file(workspace_root) + lock_path.parent.mkdir(parents=True, exist_ok=True) + lock_fh = lock_path.open("a+", encoding="utf-8") + try: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) + except BlockingIOError: + lock_fh.close() + return None + return lock_fh + + +def _release_sync_lock(lock_fh) -> None: + if lock_fh is None: + return + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) + lock_fh.close() + + +def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root = workspace_root.resolve() - plan = build_sync_plan(workspace_root) + dirty_mode = _normalize_dirty_mode(dirty_mode) + operation_id = _operation_id() + lock_fh = _acquire_sync_lock(workspace_root) + if lock_fh is None: + blocked_issue = SyncIssue( + level="error", + code="sync_lock_held", + scope="workspace", + subject=str(workspace_root), + message="another sync run currently holds the workspace lock", + blocks=True, + path=str(_sync_lock_file(workspace_root)), + details={"operation_id": operation_id}, + ) + _emit_sync_event( + workspace_root, + { + "type": "sync.conflict", + "operation_id": operation_id, + "reason": "lock_held", + "workspace_root": str(workspace_root), + }, + ) + return SyncResult( + workspace_root=str(workspace_root), + status="blocked", + plan_status="blocked", + applied=[], + blocked=[blocked_issue], + failures=[], + rollback_contract=SYNC_ROLLBACK_CONTRACT, + operation_id=operation_id, + ) + + _emit_sync_event( + workspace_root, + { + "type": "sync.started", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "dirty_mode": dirty_mode, + }, + ) + plan = build_sync_plan(workspace_root, dirty_mode=dirty_mode) blocked = [issue for issue in plan.issues if issue.blocks] if blocked: + _emit_sync_event( + workspace_root, + { + "type": "sync.failed", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "status": "blocked", + "blocked_codes": [item.code for item in blocked], + }, + ) + _release_sync_lock(lock_fh) return SyncResult( workspace_root=str(workspace_root), status="blocked", @@ -446,36 +689,53 @@ def run_sync(workspace_root: Path) -> SyncResult: blocked=blocked, failures=[], rollback_contract=SYNC_ROLLBACK_CONTRACT, + operation_id=operation_id, ) spec = load_workspace_spec_doc(workspace_root) applied: list[str] = [] failures: list[SyncIssue] = [] - for op in plan.operations: - try: - applied.append(_execute_operation(workspace_root, spec, op)) - except BaseException as exc: - failures.append(_issue_from_exception(op, exc)) - break - - if applied: - _record_apply_state(workspace_root, applied) - - status = "success" - if failures and applied: - status = "partial_failure" - elif failures: - status = "failed" - - return SyncResult( - workspace_root=str(workspace_root), - status=status, - plan_status=plan.status, - applied=applied, - blocked=[], - failures=failures, - rollback_contract=SYNC_ROLLBACK_CONTRACT, - ) + try: + for op in plan.operations: + try: + applied.append(_execute_operation(workspace_root, spec, op)) + except BaseException as exc: + failures.append(_issue_from_exception(op, exc)) + break + + if applied: + _record_apply_state(workspace_root, applied) + + status = "success" + if failures and applied: + status = "partial_failure" + elif failures: + status = "failed" + + _emit_sync_event( + workspace_root, + { + "type": "sync.completed" if status == "success" else "sync.failed", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "status": status, + "applied_count": len(applied), + "failure_codes": [item.code for item in failures], + }, + ) + + return SyncResult( + workspace_root=str(workspace_root), + status=status, + plan_status=plan.status, + applied=applied, + blocked=[], + failures=failures, + rollback_contract=SYNC_ROLLBACK_CONTRACT, + operation_id=operation_id, + ) + finally: + _release_sync_lock(lock_fh) def render_sync_result(result: SyncResult) -> str: @@ -484,6 +744,7 @@ def render_sync_result(result: SyncResult) -> str: f"workspace_root = {result.workspace_root}", f"status = {result.status}", f"plan_status = {result.plan_status}", + f"operation_id = {result.operation_id or '-'}", f"applied_count = {len(result.applied)}", f"failure_count = {len(result.failures)}", ] From 150d4a68a6a2ae0abe449983e6f9c09e1c80108a Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 10:58:02 -0500 Subject: [PATCH 04/18] fix: align sync events and blockers --- gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md | 6 +++ gr2/docs/SYNC-FAILURE-CONTRACT.md | 2 + gr2/python_cli/syncops.py | 78 ++++++++++++++++++++++++++- 3 files changed, 84 insertions(+), 2 deletions(-) diff --git a/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md index 4aa1527..578fd9c 100644 --- a/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md +++ b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md @@ -92,6 +92,7 @@ These are not optional polish. They are spawn prerequisites. - lane-aware - explicit about what it mutates - resumable after partial failure +- explicit about lease-blocked lanes ## 5. Sync Phases @@ -174,6 +175,11 @@ This is the seam premium and QA will consume. `sync` must emit enough machine-readable state for premium spawn and QA. Emit failure does not block the parent operation. +6. Terminal sync state is normalized. + `sync.completed` is the terminal event for success, blocked, failed, and + partial-failure outcomes. Intermediate contention may still emit + `sync.conflict`. + ## 7. Proposed Command Shapes Initial surfaces: diff --git a/gr2/docs/SYNC-FAILURE-CONTRACT.md b/gr2/docs/SYNC-FAILURE-CONTRACT.md index 885e541..b368745 100644 --- a/gr2/docs/SYNC-FAILURE-CONTRACT.md +++ b/gr2/docs/SYNC-FAILURE-CONTRACT.md @@ -135,6 +135,8 @@ If sync encounters an active conflicting lease: - it is a blocker, not a warning - sync does not override or steal the lease - result points to the owning actor and lease mode when available +- `sync.conflict` is emitted with the blocking lease metadata +- terminal state still arrives through `sync.completed` with `status = "blocked"` If a stale lease policy is added later, it must be explicit and separately authorized. It is not part of the default sync contract. diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index d427ae5..1dea899 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -74,6 +74,8 @@ class SyncPlan: workspace_root: str spec_path: str status: str + dirty_mode: str + dirty_targets: list[str] issues: list[SyncIssue] operations: list[SyncOperation] @@ -82,6 +84,8 @@ def as_dict(self) -> dict[str, object]: "workspace_root": self.workspace_root, "spec_path": self.spec_path, "status": self.status, + "dirty_mode": self.dirty_mode, + "dirty_targets": list(self.dirty_targets), "issue_count": len(self.issues), "operation_count": len(self.operations), "issues": [item.as_dict() for item in self.issues], @@ -94,6 +98,8 @@ class SyncResult: workspace_root: str status: str plan_status: str + dirty_mode: str + dirty_targets: list[str] applied: list[str] blocked: list[SyncIssue] failures: list[SyncIssue] @@ -105,6 +111,8 @@ def as_dict(self) -> dict[str, object]: "workspace_root": self.workspace_root, "status": self.status, "plan_status": self.plan_status, + "dirty_mode": self.dirty_mode, + "dirty_targets": list(self.dirty_targets), "applied": list(self.applied), "blocked": [item.as_dict() for item in self.blocked], "failures": [item.as_dict() for item in self.failures], @@ -165,6 +173,17 @@ def _status_from_issues(issues: list[SyncIssue]) -> str: return "ready" +def _dirty_targets(issues: list[SyncIssue], operations: list[SyncOperation]) -> list[str]: + targets: list[str] = [] + for issue in issues: + if issue.code in {"dirty_shared_repo", "dirty_lane_repo"}: + targets.append(issue.subject) + for op in operations: + if op.kind in {"stash_dirty_repo", "discard_dirty_repo"}: + targets.append(op.subject) + return sorted(dict.fromkeys(targets)) + + def _normalize_dirty_mode(dirty_mode: str) -> str: normalized = dirty_mode.strip().lower() if normalized not in VALID_DIRTY_MODES: @@ -254,6 +273,8 @@ def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncP workspace_root=str(workspace_root), spec_path=str(spec_path), status=_status_from_issues(issues), + dirty_mode=dirty_mode, + dirty_targets=[], issues=issues, operations=operations, ) @@ -377,6 +398,30 @@ def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncP continue lane_root = lane_proto.lane_dir(workspace_root, owner_unit, lane_name) + active_leases = [ + lease + for lease in lane_proto.load_lane_leases(workspace_root, owner_unit, lane_name) + if not lane_proto.is_stale_lease(lease) + ] + if active_leases: + issues.append( + SyncIssue( + level="error", + code="lease_blocked_sync", + scope="lane", + subject=f"{owner_unit}/{lane_name}", + message=f"lane has active leases that block sync mutation: {owner_unit}/{lane_name}", + blocks=True, + path=str(workspace_root / "agents" / owner_unit / "lanes" / lane_name), + details={ + "leases": [ + {"actor": lease["actor"], "mode": lease["mode"], "acquired_at": lease["acquired_at"]} + for lease in active_leases + ] + }, + ) + ) + for repo_name in lane_doc.get("repos", []): lane_repo_root = lane_root / "repos" / str(repo_name) expected_branch = str(dict(lane_doc.get("branch_map", {})).get(repo_name, "")) @@ -445,6 +490,8 @@ def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncP workspace_root=str(workspace_root), spec_path=str(spec_path), status=_status_from_issues(issues), + dirty_mode=dirty_mode, + dirty_targets=_dirty_targets(issues, operations), issues=issues, operations=operations, ) @@ -455,9 +502,13 @@ def render_sync_plan(plan: SyncPlan) -> str: "SyncPlan", f"workspace_root = {plan.workspace_root}", f"status = {plan.status}", + f"dirty_mode = {plan.dirty_mode}", f"issue_count = {len(plan.issues)}", f"operation_count = {len(plan.operations)}", ] + if plan.dirty_targets: + lines.append("DIRTY_TARGETS") + lines.extend(f"- {item}" for item in plan.dirty_targets) if plan.issues: lines.append("ISSUES") for issue in plan.issues: @@ -651,6 +702,8 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root=str(workspace_root), status="blocked", plan_status="blocked", + dirty_mode=dirty_mode, + dirty_targets=[], applied=[], blocked=[blocked_issue], failures=[], @@ -670,10 +723,23 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: plan = build_sync_plan(workspace_root, dirty_mode=dirty_mode) blocked = [issue for issue in plan.issues if issue.blocks] if blocked: + for issue in blocked: + if issue.code == "lease_blocked_sync": + _emit_sync_event( + workspace_root, + { + "type": "sync.conflict", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "reason": "active_lease", + "subject": issue.subject, + "leases": issue.details.get("leases", []), + }, + ) _emit_sync_event( workspace_root, { - "type": "sync.failed", + "type": "sync.completed", "operation_id": operation_id, "workspace_root": str(workspace_root), "status": "blocked", @@ -685,6 +751,8 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root=str(workspace_root), status="blocked", plan_status=plan.status, + dirty_mode=dirty_mode, + dirty_targets=list(plan.dirty_targets), applied=[], blocked=blocked, failures=[], @@ -715,7 +783,7 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: _emit_sync_event( workspace_root, { - "type": "sync.completed" if status == "success" else "sync.failed", + "type": "sync.completed", "operation_id": operation_id, "workspace_root": str(workspace_root), "status": status, @@ -728,6 +796,8 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root=str(workspace_root), status=status, plan_status=plan.status, + dirty_mode=dirty_mode, + dirty_targets=list(plan.dirty_targets), applied=applied, blocked=[], failures=failures, @@ -744,10 +814,14 @@ def render_sync_result(result: SyncResult) -> str: f"workspace_root = {result.workspace_root}", f"status = {result.status}", f"plan_status = {result.plan_status}", + f"dirty_mode = {result.dirty_mode}", f"operation_id = {result.operation_id or '-'}", f"applied_count = {len(result.applied)}", f"failure_count = {len(result.failures)}", ] + if result.dirty_targets: + lines.append("DIRTY_TARGETS") + lines.extend(f"- {item}" for item in result.dirty_targets) if result.applied: lines.append("APPLIED") lines.extend(f"- {item}" for item in result.applied) From 03c190536d94df8793446ab81e8e080616856e64 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 11:36:56 -0500 Subject: [PATCH 05/18] fix: align sync failure contract and interrupt handling --- gr2/docs/SYNC-FAILURE-CONTRACT.md | 4 ++-- gr2/python_cli/syncops.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/gr2/docs/SYNC-FAILURE-CONTRACT.md b/gr2/docs/SYNC-FAILURE-CONTRACT.md index b368745..eefc569 100644 --- a/gr2/docs/SYNC-FAILURE-CONTRACT.md +++ b/gr2/docs/SYNC-FAILURE-CONTRACT.md @@ -81,9 +81,9 @@ If `sync run` partially succeeds: - failed operations - unaffected operations, if known - event outbox must include: - - `sync_started` + - `sync.started` - one event per completed mutation - - `sync_failed` + - terminal `sync.completed` with a `status` field describing the outcome Consumers must be able to reconstruct: - what changed diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 1dea899..664d82a 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -767,7 +767,7 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: for op in plan.operations: try: applied.append(_execute_operation(workspace_root, spec, op)) - except BaseException as exc: + except Exception as exc: failures.append(_issue_from_exception(op, exc)) break From e43c019ebfba6e0d1bfad8a337dd67f072c9a8c2 Mon Sep 17 00:00:00 2001 From: Layne Penney Date: Wed, 15 Apr 2026 12:08:38 -0500 Subject: [PATCH 06/18] design: hook/event contract + PR lifecycle for gr2 (#572) Co-authored-by: Claude Opus 4.6 --- gr2/docs/HOOK-EVENT-CONTRACT.md | 811 ++++++++++++++++++++++++++++++++ gr2/docs/PR-LIFECYCLE.md | 582 +++++++++++++++++++++++ 2 files changed, 1393 insertions(+) create mode 100644 gr2/docs/HOOK-EVENT-CONTRACT.md create mode 100644 gr2/docs/PR-LIFECYCLE.md diff --git a/gr2/docs/HOOK-EVENT-CONTRACT.md b/gr2/docs/HOOK-EVENT-CONTRACT.md new file mode 100644 index 0000000..0889be0 --- /dev/null +++ b/gr2/docs/HOOK-EVENT-CONTRACT.md @@ -0,0 +1,811 @@ +# gr2 Hook/Event Contract + +This document defines the event contract for gr2: what events the system emits, +their schema, delivery model, and how consumers (spawn, recall, channel bridge) +integrate. + +This is a **design document** for Sprint 20. It does not describe current +behavior; it defines the target contract. + +## 1. Design Goals + +- Every gr2 operation that changes workspace state emits a typed event. +- Events are durable, append-only, and replayable. +- Consumers read events at their own pace via cursors. gr2 does not block on + delivery. +- The event schema is the stable API between OSS gr2 and premium spawn. +- Hook execution is one event source among several, not the only one. + +## 2. Event Sources + +gr2 emits events from five operational domains: + +| Domain | Examples | Current State | +|--------|----------|---------------| +| **Lane lifecycle** | lane.created, lane.entered, lane.exited, lane.archived | Partial (SYNAPT-INTEGRATION.md defines format, not wired) | +| **Lease lifecycle** | lease.acquired, lease.released, lease.expired, lease.force_broken | Prototype only | +| **Hook execution** | hook.started, hook.completed, hook.failed | hooks.py runs commands but emits nothing | +| **PR lifecycle** | pr.created, pr.status_changed, pr.merged, pr.checks_passed | Missing (Sprint 20 deliverable) | +| **Sync operations** | sync.started, sync.repo_updated, sync.completed, sync.conflict | Missing (Atlas's sync algorithm design) | + +Each domain owns a namespace prefix. Events are globally ordered by timestamp +and monotonic sequence number within the outbox. + +## 3. Event Schema + +### 3.1 Common Envelope + +Every event is a single flat JSON object. Envelope fields and domain-specific +fields sit at the same level. There is no nested `payload` wrapper. + +```json +{ + "version": 1, + "event_id": "a1b2c3d4e5f67890", + "seq": 42, + "timestamp": "2026-04-15T16:30:00+00:00", + "type": "lane.entered", + "workspace": "synapt-dev", + "actor": "agent:apollo", + "agent_id": "agent_apollo_xyz789", + "owner_unit": "apollo", + "lane_name": "feat/hook-events", + "lane_type": "feature", + "repos": ["grip", "synapt"] +} +``` + +This flat shape matches Atlas's sync outbox implementation (`syncops.py`), where +`_append_outbox_event` spreads caller-provided fields into the envelope via +`{**envelope, **payload}`. Consumers read domain fields directly from the +top-level object without unwrapping a nested payload. + +**Envelope fields** (added automatically by the emit function): + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `version` | int | yes | Schema version. Always `1` for this contract. | +| `event_id` | string | yes | Unique event identifier. 16-char hex from `os.urandom(8).hex()`. | +| `seq` | int | yes | Monotonically increasing sequence number within this outbox file. Starts at 1. | +| `timestamp` | string | yes | ISO 8601 with timezone. | +| `type` | string | yes | Dotted event type from the taxonomy (section 3.2). | + +**Context fields** (provided by the caller, required unless noted): + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `workspace` | string | yes | Workspace name from WorkspaceSpec. | +| `actor` | string | yes | Who triggered the event. Format: `agent:`, `human:`, or `system`. | +| `agent_id` | string | no | Persistent agent identity from premium. Opaque in OSS. | +| `owner_unit` | string | yes | Unit that owns the context where this event occurred. | + +**Domain fields** vary by event type. See section 3.2 for the fields each event +type carries. Domain fields are top-level keys alongside envelope and context +fields. + +Rules: +- `event_id` must be unique within a workspace. +- `seq` must be strictly monotonically increasing within a single outbox file. +- `actor` uses the prefix convention to distinguish agents from humans from + automated operations. +- `agent_id` is optional because human-triggered and system-triggered events + do not have one. +- Domain field names must not collide with envelope or context field names. + The reserved names are: `version`, `event_id`, `seq`, `timestamp`, `type`, + `workspace`, `actor`, `agent_id`, `owner_unit`. + +### 3.2 Event Type Taxonomy + +#### Lane Lifecycle + +| Type | Trigger | Payload | +|------|---------|---------| +| `lane.created` | `gr2 lane create` | `{lane_name, lane_type, repos: [str], branch_map: {repo: branch}}` | +| `lane.entered` | `gr2 lane enter` | `{lane_name, lane_type, repos: [str]}` | +| `lane.exited` | `gr2 lane exit` | `{lane_name, stashed_repos: [str]}` | +| `lane.switched` | Enter a different lane (exit + enter) | `{from_lane, to_lane, stashed_repos: [str]}` | +| `lane.archived` | Lane cleanup after merge | `{lane_name, reason}` | + +#### Lease Lifecycle + +| Type | Trigger | Payload | +|------|---------|---------| +| `lease.acquired` | `gr2 lane lease acquire` | `{lane_name, mode, ttl_seconds, lease_id}` | +| `lease.released` | `gr2 lane lease release` | `{lane_name, lease_id}` | +| `lease.expired` | TTL watchdog or next acquire check | `{lane_name, lease_id, expired_at}` | +| `lease.force_broken` | `--force` acquire or admin break | `{lane_name, lease_id, broken_by, reason}` | + +#### Hook Execution + +| Type | Trigger | Payload | +|------|---------|---------| +| `hook.started` | Lifecycle hook begins execution | `{stage, hook_name, repo, command, cwd}` | +| `hook.completed` | Hook exits successfully | `{stage, hook_name, repo, duration_ms, exit_code: 0}` | +| `hook.failed` | Hook exits with non-zero code | `{stage, hook_name, repo, duration_ms, exit_code, on_failure, stderr_tail}` | +| `hook.skipped` | Hook `when` condition not met | `{stage, hook_name, repo, reason}` | + +Rules for hook events: +- `stderr_tail` is the last 500 bytes of stderr, truncated. Full output is not + stored in the event. +- `on_failure` records the policy that was applied (block, warn, skip). +- `hook.failed` with `on_failure: "block"` means the parent operation also + failed. Consumers should expect a corresponding operation failure event. + +#### PR Lifecycle + +| Type | Trigger | Payload | +|------|---------|---------| +| `pr.created` | `gr2 pr create` | `{pr_group_id, repos: [{repo, pr_number, url, title, base, head}]}` | +| `pr.status_changed` | Poll or webhook | `{pr_group_id, repo, pr_number, old_status, new_status}` | +| `pr.checks_passed` | All CI checks green | `{pr_group_id, repo, pr_number}` | +| `pr.checks_failed` | CI check failure | `{pr_group_id, repo, pr_number, failed_checks: [str]}` | +| `pr.review_submitted` | Review posted | `{pr_group_id, repo, pr_number, reviewer, verdict}` | +| `pr.merged` | `gr2 pr merge` | `{pr_group_id, repos: [{repo, pr_number, merge_sha}]}` | +| `pr.merge_failed` | Merge blocked or conflict | `{pr_group_id, repo, pr_number, reason}` | + +`pr_group_id` is the cross-repo correlation key. When `gr2 pr create` creates +PRs in multiple repos, they share the same `pr_group_id`. This is how consumers +reconstruct the cross-repo PR as a unit. + +**Boundary**: `pr_group_id` is assigned by gr2's orchestration layer (`pr.py`), +not by PlatformAdapter. PlatformAdapter is group-unaware: it creates, queries, +and merges individual per-repo PRs. The `pr.py` module correlates them into a +group and assigns the `pg_` prefixed ID. This keeps platform adapters simple and +reusable across contexts that may not need grouping. + +#### Sync Operations + +| Type | Trigger | Payload | +|------|---------|---------| +| `sync.started` | `gr2 sync` begins | `{repos: [str], strategy}` | +| `sync.repo_updated` | Single repo pull/rebase completes | `{repo, old_sha, new_sha, strategy, commits_pulled: int}` | +| `sync.repo_skipped` | Repo skipped (dirty, no remote, etc.) | `{repo, reason}` | +| `sync.conflict` | Merge/rebase conflict during sync | `{repo, conflicting_files: [str]}` | +| `sync.completed` | `gr2 sync` finishes | `{status, repos_updated: int, repos_skipped: int, repos_failed: int, duration_ms}` | + +`sync.completed` is the **single terminal event** for sync operations. There is no +separate `sync.failed` type. The `status` field distinguishes outcomes: + +| `status` value | Meaning | +|----------------|---------| +| `success` | All repos updated without error. | +| `partial_failure` | Some repos updated, some failed. `repos_failed > 0`. | +| `blocked` | Sync could not proceed (e.g., unresolved failure marker). | +| `failed` | All repos failed or sync aborted early. | + +This matches Atlas's `syncops.py` pattern, which uses `sync.completed` with a +status field rather than emitting a separate `sync.failed` event type. + +#### Recovery + +| Type | Trigger | Payload | +|------|---------|---------| +| `failure.resolved` | `gr2 lane resolve ` | `{operation_id, resolved_by, resolution, lane_name}` | +| `lease.reclaimed` | Stale lease garbage-collected during acquire | `{lane_name, lease_id, previous_holder, expired_at, reclaimed_by}` | + +`failure.resolved` is emitted when an agent explicitly clears a failure marker +(section 14.1). `lease.reclaimed` is emitted when a stale lease is +garbage-collected during a new acquire (section 14.2, step 6-7). This is +distinct from `lease.expired` (which fires at the point of staleness detection) +and `lease.force_broken` (which fires when a live lease is broken with +`--force`). + +#### Workspace Operations + +| Type | Trigger | Payload | +|------|---------|---------| +| `workspace.materialized` | `gr2 workspace materialize` or `gr2 apply` | `{repos: [{repo, first_materialize: bool}]}` | +| `workspace.file_projected` | File link/copy applied | `{repo, kind, src, dest}` | + +### 3.3 Payload Conventions + +- All paths in payloads are relative to `workspace_root`, never absolute. +- Repo names match `WorkspaceSpec` `[[repos]]` names, not filesystem paths. +- SHA values are full 40-char hex. +- Duration values are in milliseconds as integers. +- String arrays are used for repo lists, file lists, etc. Never comma-separated strings. + +## 4. Event Outbox + +### 4.1 Storage + +Events are written to a single append-only JSONL file: + +``` +.grip/events/outbox.jsonl +``` + +One JSON object per line. No trailing commas. No array wrapper. + +The outbox file is the single source of truth for all gr2 events in a workspace. + +### 4.2 Write Path + +Events are written synchronously at the point of state change: + +1. Operation performs its work (e.g., creates a lane, runs a hook). +2. Operation calls `emit(event_type, workspace_root, actor, owner_unit, payload)`. +3. `emit()` assigns `event_id`, `seq`, `timestamp`. +4. Event is serialized and appended to `outbox.jsonl`. +5. File is flushed (fsync not required; OS page cache is sufficient for + local-only delivery). + +`seq` is derived from the current line count of the outbox file plus one. This +is safe because gr2 operations are single-process. If concurrent writers become +necessary (multiple agents in the same workspace), `seq` assignment must move to +a lock or use a separate sequence file. + +### 4.3 Rotation + +When `outbox.jsonl` exceeds 10 MB: + +1. Rename to `outbox.{timestamp}.jsonl`. +2. Create new empty `outbox.jsonl` with `seq` continuing from the last value. +3. Old files are retained for 7 days, then eligible for cleanup by `gr2 gc`. + +Consumers must handle rotation by checking for new files when their cursor +points past the end of the current file. + +### 4.4 No Deletion + +Events are never deleted from the outbox. They are append-only. Rotation moves +old events to archived files but does not remove them. `gr2 gc` is the only +operation that removes archived event files, and only after the retention period. + +## 5. Consumer Model + +### 5.1 Cursor-Based Reading + +Each consumer maintains a cursor file in `.grip/events/cursors/`: + +``` +.grip/events/cursors/{consumer_name}.json +``` + +Cursor format: + +```json +{ + "consumer": "channel_bridge", + "last_seq": 41, + "last_event_id": "a1b2c3d4e5f67890", + "last_read": "2026-04-15T16:31:00+00:00" +} +``` + +Reading flow: + +1. Consumer opens cursor file (or starts at seq 0 if no cursor exists). +2. Consumer reads `outbox.jsonl` from line `last_seq + 1` forward. +3. Consumer processes each event. +4. Consumer updates cursor atomically (write temp file, rename). + +### 5.2 Known Consumers + +| Consumer | Location | What It Does | +|----------|----------|--------------| +| **channel_bridge** | OSS | Derives `#dev`-style notifications from events. Posts to channel transport. | +| **recall_indexer** | OSS | Indexes events into recall for searchable lane/activity history. | +| **spawn_watcher** | Premium | Watches for events that trigger agent orchestration (lane assignments, PR readiness, hook failures). | + +### 5.3 Consumer Contract + +Consumers must: +- Be idempotent. Re-processing the same event (e.g., after a crash before + cursor update) must produce the same result. +- Use `event_id` for deduplication if their target store does not naturally + deduplicate. +- Not modify or delete events in the outbox. +- Handle unknown event types gracefully (skip, log, do not crash). +- Handle schema version bumps by checking `version` and ignoring events with + a version they do not understand. + +### 5.4 Spawn Integration (Premium) + +Spawn is the premium consumer that orchestrates multi-agent workflows. It +consumes the same outbox as OSS consumers but interprets events through the +lens of org policy and agent identity. + +Events that spawn cares about: + +| Event | Spawn Reaction | +|-------|----------------| +| `lane.created` | May assign agent to lane based on policy. | +| `pr.created` | May assign reviewers based on compiled review requirements. | +| `pr.checks_passed` | May trigger merge if auto-merge policy is active. | +| `pr.checks_failed` | May notify owning agent or escalate. | +| `hook.failed` with `on_failure: "block"` | May retry, reassign, or alert. | +| `lease.expired` | May reclaim the lane or notify the agent. | +| `sync.conflict` | May pause agent work on conflicting repos. | + +Spawn does not write to the outbox. Spawn's actions (assigning agents, +triggering merges) flow back through the gr2 CLI, which then emits its own +events. This prevents circular event chains. + +## 6. Hook Execution Contract + +This section formalizes the relationship between hook execution (hooks.py) and +event emission. + +### 6.1 Current State + +`hooks.py` currently: +- Parses `.gr2/hooks.toml` +- Resolves template variables +- Runs commands via `subprocess.run` +- Raises `SystemExit` on `on_failure: "block"` failures +- Prints JSON on `on_failure: "warn"` failures +- Does nothing on `on_failure: "skip"` failures + +It does **not** emit structured events. + +### 6.2 Target State + +Every hook execution produces events: + +``` +hook.started -> (command runs) -> hook.completed | hook.failed +``` + +If the hook's `when` condition is not met: + +``` +hook.skipped +``` + +The lifecycle stage runner (`run_lifecycle_stage`) becomes the event emitter. +After running all hooks for a stage, it emits the parent lifecycle event +(e.g., `lane.entered`) with a summary of hook results in the payload. + +### 6.3 Hook Output Capture + +Hook commands produce stdout and stderr. The event contract does not store full +output in events (it would bloat the outbox). Instead: + +- `hook.completed` includes `duration_ms` and `exit_code: 0`. +- `hook.failed` includes `duration_ms`, `exit_code`, `on_failure` policy, and + `stderr_tail` (last 500 bytes). +- Full stdout/stderr is written to: + ``` + .grip/events/hook_output/{event_id}.stdout + .grip/events/hook_output/{event_id}.stderr + ``` +- Hook output files follow the same retention policy as rotated outbox files. + +### 6.4 Hook Failure Propagation + +When a hook fails with `on_failure: "block"`: + +1. `hook.failed` event is emitted with `on_failure: "block"`. +2. The parent operation (e.g., `workspace.materialized`) is **not** emitted + because the operation did not complete. +3. Instead, the calling code should emit a domain-specific failure event + (e.g., `sync.conflict` or handle it in its own error path). + +When a hook fails with `on_failure: "warn"`: + +1. `hook.failed` event is emitted with `on_failure: "warn"`. +2. The parent operation continues and eventually emits its success event. +3. Consumers can correlate the `hook.failed` event with the parent by timestamp + and `owner_unit` context. + +When a hook fails with `on_failure: "skip"`: + +1. `hook.failed` event is emitted with `on_failure: "skip"`. +2. No consumer-visible notification. The event exists for audit trail only. + +## 7. Event Emission API + +### 7.1 Python Interface + +```python +from gr2.events import emit, EventType + +# Simple emission +emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace_root, + actor="agent:apollo", + owner_unit="apollo", + payload={ + "lane_name": "feat/hook-events", + "lane_type": "feature", + "repos": ["grip", "synapt"], + }, +) + +# With optional agent_id +emit( + event_type=EventType.HOOK_FAILED, + workspace_root=workspace_root, + actor="agent:apollo", + agent_id="agent_apollo_xyz789", + owner_unit="apollo", + payload={ + "stage": "on_materialize", + "hook_name": "editable-install", + "repo": "synapt", + "duration_ms": 3400, + "exit_code": 1, + "on_failure": "block", + "stderr_tail": "ERROR: pip install failed ...", + }, +) +``` + +### 7.2 EventType Enum + +```python +class EventType(str, Enum): + # Lane lifecycle + LANE_CREATED = "lane.created" + LANE_ENTERED = "lane.entered" + LANE_EXITED = "lane.exited" + LANE_SWITCHED = "lane.switched" + LANE_ARCHIVED = "lane.archived" + + # Lease lifecycle + LEASE_ACQUIRED = "lease.acquired" + LEASE_RELEASED = "lease.released" + LEASE_EXPIRED = "lease.expired" + LEASE_FORCE_BROKEN = "lease.force_broken" + + # Hook execution + HOOK_STARTED = "hook.started" + HOOK_COMPLETED = "hook.completed" + HOOK_FAILED = "hook.failed" + HOOK_SKIPPED = "hook.skipped" + + # PR lifecycle + PR_CREATED = "pr.created" + PR_STATUS_CHANGED = "pr.status_changed" + PR_CHECKS_PASSED = "pr.checks_passed" + PR_CHECKS_FAILED = "pr.checks_failed" + PR_REVIEW_SUBMITTED = "pr.review_submitted" + PR_MERGED = "pr.merged" + PR_MERGE_FAILED = "pr.merge_failed" + + # Sync operations + SYNC_STARTED = "sync.started" + SYNC_REPO_UPDATED = "sync.repo_updated" + SYNC_REPO_SKIPPED = "sync.repo_skipped" + SYNC_CONFLICT = "sync.conflict" + SYNC_COMPLETED = "sync.completed" + + # Recovery + FAILURE_RESOLVED = "failure.resolved" + LEASE_RECLAIMED = "lease.reclaimed" + + # Workspace operations + WORKSPACE_MATERIALIZED = "workspace.materialized" + WORKSPACE_FILE_PROJECTED = "workspace.file_projected" +``` + +### 7.3 Implementation Location + +The event emission module lives at: + +``` +gr2/python_cli/events.py +``` + +This module owns: +- `emit()` function +- `EventType` enum +- Outbox file management (append, rotation, seq tracking) +- Cursor read helpers for consumers + +It does **not** own consumer logic. Each consumer is a separate module. + +## 8. Channel Bridge Event Mapping + +The channel bridge translates gr2 events into channel messages. Not every event +produces a channel message. + +| Event | Channel Message | Channel | +|-------|----------------|---------| +| `lane.created` | `"{actor} created lane {lane_name} [{lane_type}] repos={repos}"` | #dev | +| `lane.entered` | `"{actor} entered {owner_unit}/{lane_name}"` | #dev | +| `lane.exited` | `"{actor} exited {owner_unit}/{lane_name}"` | #dev | +| `pr.created` | `"{actor} opened PR group {pr_group_id}: {repos}"` | #dev | +| `pr.merged` | `"{actor} merged PR group {pr_group_id}"` | #dev | +| `pr.checks_failed` | `"CI failed on {repo}#{pr_number}: {failed_checks}"` | #dev | +| `hook.failed` (block) | `"Hook {hook_name} failed in {repo} (blocking): {stderr_tail}"` | #dev | +| `sync.conflict` | `"Sync conflict in {repo}: {conflicting_files}"` | #dev | +| `lease.force_broken` | `"Lease on {lane_name} force-broken by {broken_by}: {reason}"` | #dev | +| `failure.resolved` | `"{resolved_by} resolved failure {operation_id} on {lane_name}"` | #dev | +| `lease.reclaimed` | `"Stale lease on {lane_name} reclaimed (was held by {previous_holder})"` | #dev | + +Events not listed (hook.started, hook.completed, hook.skipped, lease.acquired, +lease.released, sync.repo_updated, workspace.file_projected, etc.) are **not** +posted to channels by default. They exist in the outbox for recall indexing and +spawn, but would be noise in `#dev`. + +The channel bridge can be configured to include or exclude specific event types +via a filter file at `.grip/events/channel_filter.toml`: + +```toml +[channel_bridge] +include = ["lane.*", "pr.*", "hook.failed", "sync.conflict", "lease.force_broken", "failure.resolved", "lease.reclaimed"] +exclude = ["hook.started", "hook.completed", "hook.skipped"] +``` + +Default: the mapping table above. Filter file is optional. + +**Filter vs. mapping**: The `include` globs may match event types that have no +entry in the mapping table (e.g., `lane.*` matches `lane.switched` and +`lane.archived`, which are not in the table above). Events that match the +include filter but have no mapping template are silently dropped by the bridge. +The filter controls which events the bridge *considers*; the mapping table +controls which events produce channel messages. To add a channel message for a +new event type, add both a mapping entry and ensure the filter covers it. + +## 9. Recall Indexing + +Recall indexes all events (not just the channel-visible subset) for searchable +history. The recall indexer is a cursor-based consumer that: + +1. Reads new events from the outbox. +2. Indexes each event by: lane, actor, repo, event type, and time range. +3. Stores indexed events in recall's existing storage layer. + +Query examples that this enables: + +- `recall_files(path="grip/src/main.rs")` can include "last sync updated this + file" if sync events include file-level detail. +- `recall_search("hook failure editable-install")` returns the hook.failed event + and its context. +- `recall_timeline(actor="agent:apollo", start="2026-04-15")` shows Apollo's + full activity timeline. + +The recall indexer does **not** need premium logic. It consumes the same neutral +event stream as the channel bridge. + +## 10. Failure Modes and Recovery + +### 10.1 Outbox Write Failure + +If `emit()` fails to append (disk full, permission error): + +- The event is lost. The operation that triggered it still completed. +- The outbox may be in an inconsistent state (partial line written). +- Recovery: consumers skip malformed lines. `gr2 gc` can truncate trailing + partial lines. + +Mitigation: `emit()` should catch write errors and log them to stderr +without crashing the parent operation. Events are important but not +operation-critical. + +### 10.2 Consumer Crash Mid-Processing + +If a consumer crashes after reading an event but before updating its cursor: + +- On restart, it re-reads from `last_seq + 1` and reprocesses events. +- This is safe because consumers must be idempotent (section 5.3). + +### 10.3 Outbox Rotation During Consumer Read + +If the outbox rotates while a consumer is reading: + +- The consumer's cursor points to a seq that no longer exists in the current + `outbox.jsonl`. +- Consumer must scan archived `outbox.{timestamp}.jsonl` files in order to find + the file containing its cursor position. +- Once caught up through archived files, it continues reading the current + `outbox.jsonl`. + +### 10.4 Concurrent Writers + +The current design assumes single-process writes (one gr2 CLI invocation at a +time per workspace). If concurrent writes become necessary: + +- Option A: File-level advisory lock during append. +- Option B: Separate outbox files per writer, with a merge step. +- Option C: Move to SQLite WAL-mode database. + +This is explicitly out of scope for the initial implementation. The single-writer +assumption is safe because gr2 operations are CLI-driven and workspace-local. + +## 11. Versioning and Evolution + +### 11.1 Schema Version + +The `version` field in the event envelope is `1` for this initial contract. + +Version bumps happen when: +- A required field is added to the common envelope. +- A payload field's type or meaning changes in a breaking way. + +Version bumps do **not** happen when: +- A new event type is added (consumers skip unknown types). +- An optional field is added to a payload. +- A new consumer is added. + +### 11.2 Backward Compatibility + +New event types are additive. Consumers that do not understand a new type skip +it. This means adding `pr.review_submitted` in a future release does not require +updating all consumers. + +Payload changes within an existing event type should be additive (new optional +fields). If a breaking change is needed, bump the version and document the +migration. + +## 12. Relation to Existing Documents + +This document supersedes the event-related sections of: + +- **SYNAPT-INTEGRATION.md** section 4 (Lane Event -> Recall Pipeline): This + contract formalizes and extends that design. The event format here is the + canonical schema; SYNAPT-INTEGRATION.md's examples are now illustrative only. +- **SYNAPT-INTEGRATION.md** section 5 (Channel Bridge): The channel bridge model + here is consistent but more precise about filtering and cursor management. + +This document builds on: + +- **HOOK-CONFIG-MODEL.md**: The hook execution contract (section 6) extends the + lifecycle model defined there. hooks.toml schema is unchanged; the new + contribution is event emission during hook execution. + +This document is a dependency for: + +- **PR-LIFECYCLE.md** (Sprint 20, Apollo): PR lifecycle design references the + pr.* event types defined here. +- **PLATFORM-ADAPTER-AND-SYNC.md** (Sprint 20, Atlas): Sync algorithm references + the sync.* event types defined here. +- **QA Arena** (Sprint 20, Sentinel): Adversarial test scenarios should exercise + event emission failure modes (section 10). + +## 13. Open Questions + +1. **Hook output retention**: Should hook output files (`.grip/events/hook_output/`) + follow the same 7-day retention as rotated outbox files, or longer? +2. **Event batching**: Should operations that touch multiple repos emit one event + per repo or one aggregate event? Current design uses both patterns depending + on the domain (sync uses per-repo events; PR uses aggregate events with + per-repo detail in payload arrays). +3. **Webhook bridge**: Should gr2 support an HTTP webhook consumer in addition to + file-based cursor consumers? This would be relevant for remote spawn + deployments. +4. **SQLite alternative**: For workspaces with heavy event traffic (many agents, + frequent operations), should the outbox be SQLite WAL instead of JSONL? + JSONL is simpler and auditable; SQLite handles concurrent writes better. +5. **Event signing**: Should events carry a signature or checksum for tamper + detection? Relevant if the outbox is consumed by premium policy enforcement. + +## 14. Failure Recovery Contract + +This section formalizes how gr2 handles operation failures at the state level. +Section 10 covers event infrastructure failures (outbox writes, consumer +crashes). This section covers operation-level failures: what happens to +workspace state when hooks fail, leases expire, or lane switches encounter +dirty repos. + +The core principle: **gr2 operations are forward-only. There is no rollback.** +Failures leave partial state with explicit markers that require resolution. + +### 14.1 Failure Markers + +When an operation fails mid-execution, gr2 writes a failure marker: + +``` +.grip/state/failures/{operation_id}.json +``` + +Marker format: + +```json +{ + "operation_id": "op_9f2a3b4c", + "operation": "sync", + "stage": "on_enter", + "hook_name": "editable-install", + "repo": "synapt", + "owner_unit": "apollo", + "lane_name": "feat/hook-events", + "failed_at": "2026-04-15T17:00:00+00:00", + "event_id": "9f3a7b2c1d4e8f06", + "partial_state": { + "repos_completed": ["grip"], + "repos_pending": ["synapt-private"], + "repo_failed": "synapt" + }, + "resolved": false +} +``` + +Marker behavior: + +- **Blocking**: The next operation on the same scope (lane, repos) checks for + unresolved failure markers. If one exists, the operation refuses to proceed + and reports the marker. +- **Resolution**: `gr2 lane resolve ` clears the marker. The + agent must decide whether to retry, skip, or escalate. Resolution is always + explicit. +- **Event**: Resolving a marker emits a new event type: + `failure.resolved` with payload `{operation_id, resolved_by, resolution, lane_name}`. + +Why no automatic retry: retrying a failed hook might produce the same failure. +The agent (or spawn) has context about whether retry is appropriate. gr2 does +not guess. + +Why no rollback: reverting git operations (undo fetch+merge, undo checkout) is +dangerous, sometimes impossible (remote state changed), and introduces a second +failure mode (what if the revert fails?). Forward-only resolution is simpler and +more honest about what happened. + +### 14.2 Lease Reclaim Lifecycle + +Leases use TTL-first expiry with optional heartbeat renewal. + +**TTL expiry** is the primary reclaim mechanism: + +- Every lease carries `ttl_seconds` (default 900s) and `expires_at`. +- Expiry is checked lazily: the next `acquire`, `show`, or `status` call + evaluates `is_stale_lease()` (already in prototype at + `lane_workspace_prototype.py:592`). +- No daemon or background process required. + +**Heartbeat renewal** is optional: + +- `gr2 lane lease renew ` resets + `expires_at` to `now + ttl_seconds`. +- Agents running long operations (multi-repo test suites, large builds) call + renew periodically to prevent premature expiry. +- If the agent crashes, renewal stops, and TTL expiry reclaims the lease + naturally. + +**Reclaim flow**: + +1. Agent A holds lease with `expires_at = T`. +2. Agent A crashes (no explicit release). +3. Time passes beyond T. +4. Agent B calls `gr2 lane lease acquire`. +5. `acquire` finds A's lease, evaluates `is_stale_lease()` -> true. +6. Emits `lease.expired` event (payload: `{lane_name, lease_id, expired_at}`). +7. Garbage-collects A's stale lease from the lane doc. +8. Emits `lease.reclaimed` event (payload: + `{lane_name, lease_id, previous_holder, expired_at, reclaimed_by}`). +9. Grants B's new lease. Emits `lease.acquired` event. + +**Force break**: + +- `gr2 lane lease acquire --force` breaks a live (non-expired) lease. +- Emits `lease.force_broken` event with `{broken_by, reason}`. +- Notification routing to the original holder is a **channel_bridge consumer + responsibility**, not a core gr2 concern. The `lease.force_broken` event + carries `broken_by` and the original holder's identity in context fields. + The channel bridge (or spawn_watcher) decides how and where to deliver the + notification based on its own routing rules. + +### 14.3 Dirty State on Lane Switch + +Lane transitions handle uncommitted changes via an explicit `--dirty` mode. + +**Modes** (flag on `lane enter` and `lane exit`): + +| Mode | Behavior | Default? | +|------|----------|----------| +| `stash` | Auto-stash dirty repos. Stash message: `"gr2 auto-stash: exiting {unit}/{lane}"`. | Yes | +| `block` | Refuse to switch if any repo is dirty. List dirty repos in error. | No | +| `discard` | Discard uncommitted changes. Requires `--yes` flag. | No | + +**Event payloads for dirty state**: + +- `lane.exited` with `stashed_repos: ["synapt"]` when stash mode is used. +- `lane.exited` with `discarded_repos: ["synapt"]` when discard mode is used. +- No `lane.exited` event when block mode prevents the exit. + +**Re-entry with stashed state**: + +When `lane enter` is called and the lane has stashed state from a previous exit: + +- Default: warn that stashed state exists, do not auto-pop. The agent decides + whether to `git stash pop` manually. +- `--dirty=restore` on `lane enter`: auto-pop the stash. If the pop produces + a merge conflict, leave the conflict markers and emit a `hook.failed`-style + warning event. + +**Consistency rule**: The `--dirty` flag and its values (`stash`, `block`, +`discard`, `restore`) must be consistent across `lane enter`, `lane exit`, and +`sync`. This is a shared contract with Atlas's sync algorithm design. diff --git a/gr2/docs/PR-LIFECYCLE.md b/gr2/docs/PR-LIFECYCLE.md new file mode 100644 index 0000000..0166585 --- /dev/null +++ b/gr2/docs/PR-LIFECYCLE.md @@ -0,0 +1,582 @@ +# gr2 PR Lifecycle Management + +This document defines how gr2 manages pull requests across multiple repos. It +builds on Atlas's PlatformAdapter protocol and references the event contract in +HOOK-EVENT-CONTRACT.md. + +This is a **design document** for Sprint 20. It does not describe current +behavior; it defines the target design for `gr2 pr` commands. + +## 1. Design Goals + +- PR operations are cross-repo by default. `gr2 pr create` creates linked PRs + across all repos with changes on the current lane's branch. +- A PR group is the first-class unit. Individual repo PRs are children of the + group. +- PR state transitions emit events from the hook/event contract. +- PlatformAdapter is the only interface to the hosting platform. gr2 does not + shell out to `gh`, `glab`, or platform-specific CLIs. +- Merge ordering is explicit and configurable, not implicit. + +## 2. Concepts + +### 2.1 PR Group + +A **PR group** is a set of related PRs across repos that belong to the same +logical change. When an agent works on a lane that touches `grip`, `synapt`, and +`synapt-private`, `gr2 pr create` produces one PR group with three child PRs. + +```json +{ + "pr_group_id": "pg_8a3f1b2c", + "lane_name": "feat/hook-events", + "owner_unit": "apollo", + "created_by": "agent:apollo", + "created_at": "2026-04-15T17:00:00+00:00", + "title": "feat: hook/event contract design", + "base_branch": "sprint-20", + "head_branch": "design/hook-event-contract", + "prs": [ + { + "repo": "grip", + "pr_number": 570, + "url": "https://github.com/synapt-dev/grip/pull/570", + "status": "open", + "checks_status": "pending", + "reviews": [] + }, + { + "repo": "synapt", + "pr_number": 583, + "url": "https://github.com/synapt-dev/synapt/pull/583", + "status": "open", + "checks_status": "passing", + "reviews": [{"reviewer": "sentinel", "verdict": "approved"}] + } + ] +} +``` + +The `pr_group_id` is the cross-repo correlation key from the event contract. +Format: `pg_` prefix + 8-char hex. + +### 2.2 PR Group State + +A PR group has an aggregate state derived from its children: + +| Group State | Condition | +|-------------|-----------| +| `draft` | All child PRs are draft. | +| `open` | At least one child PR is open (non-draft). | +| `checks_pending` | At least one child PR has pending checks. | +| `checks_passing` | All child PRs have passing checks. | +| `checks_failing` | At least one child PR has failing checks. | +| `review_required` | At least one child PR needs more reviews to meet compiled review requirements. | +| `approved` | All child PRs meet their review requirements. | +| `mergeable` | All children are `checks_passing` + `approved` + no merge conflicts. | +| `merged` | All child PRs have been merged. | +| `partially_merged` | Some (but not all) child PRs have been merged. This is an error state. | + +Group state is computed, not stored. `gr2 pr status` queries each child PR via +PlatformAdapter and aggregates. + +### 2.3 PR Group Storage + +PR group metadata is stored at: + +``` +.grip/pr_groups/{pr_group_id}.json +``` + +This file is created by `gr2 pr create` and updated by `gr2 pr status` (to +cache last-known state) and `gr2 pr merge` (to record merge SHAs). + +The `.grip/pr_groups/` directory is workspace-local state, not committed to any +repo. + +## 3. Commands + +### 3.1 `gr2 pr create` + +Creates linked PRs across repos. + +``` +gr2 pr create + --title "feat: hook/event contract" + [--body "description"] + [--base sprint-20] + [--draft] + [--push] + [--json] +``` + +Flow: + +1. Load lane doc for `owner_unit/lane_name`. +2. For each repo in the lane's `repos` list: + a. Check if the repo has commits on `head_branch` not in `base_branch`. + b. Skip repos with no new commits (no empty PRs). + c. Push the branch if `--push` is set. + d. Call `PlatformAdapter.create_pr(repo, head, base, title, body, draft)`. + e. Record the returned PR number and URL. +3. Generate `pr_group_id`. +4. Write PR group metadata to `.grip/pr_groups/{pr_group_id}.json`. +5. Update each child PR's body to include cross-links: + ``` + ## Linked PRs (gr2 group: pg_8a3f1b2c) + - synapt-dev/grip#570 + - synapt-dev/synapt#583 + ``` +6. Emit `pr.created` event. +7. Print summary. + +**Cross-linking** is important: each child PR's body includes references to all +sibling PRs. This makes the relationship visible on the platform even if gr2 is +not available. + +**Base branch resolution**: If `--base` is not specified, use the lane's +`base_branch` (from lane doc) or fall back to the repo's default branch. + +### 3.2 `gr2 pr status` + +Shows aggregated status of the PR group for the current lane. + +``` +gr2 pr status [] + [--json] +``` + +Flow: + +1. Find the PR group for the lane (scan `.grip/pr_groups/` for matching + `lane_name` and `owner_unit`). +2. For each child PR, call `PlatformAdapter.get_pr(repo, pr_number)`. +3. For each child PR, call `PlatformAdapter.get_checks(repo, pr_number)`. +4. For each child PR, call `PlatformAdapter.get_reviews(repo, pr_number)`. +5. Aggregate into group state. +6. Evaluate review requirements from compiled workspace constraints. +7. Print summary table: + +``` +PR Group pg_8a3f1b2c: feat: hook/event contract +Lane: apollo/design/hook-event-contract -> sprint-20 + + Repo PR Checks Reviews Mergeable + grip #570 passing 1/1 required yes + synapt #583 pending 0/1 required no (checks pending) + + Group state: checks_pending + Blocking: synapt checks pending +``` + +If any child PR has status changes since the last cached state, emit +`pr.status_changed` events. + +### 3.3 `gr2 pr merge` + +Merges the PR group. + +``` +gr2 pr merge [] + [--strategy squash|merge|rebase] + [--force] + [--auto] + [--json] +``` + +Flow: + +1. Find the PR group for the lane. +2. Compute group state. If not `mergeable` and `--force` is not set, abort with + an error explaining what is blocking. +3. Determine merge order (section 4). +4. For each child PR in order: + a. Call `PlatformAdapter.merge_pr(repo, pr_number, strategy)`. + b. Record the merge SHA. + c. If merge fails, stop. Do not merge remaining repos. Emit + `pr.merge_failed` event. +5. If all merges succeed, emit `pr.merged` event. +6. Update PR group metadata with merge SHAs and final state. +7. Print summary. + +**`--auto` mode**: Instead of merging immediately, enable auto-merge on each +child PR. The platform merges each PR when its checks pass. This is useful for +CI-heavy repos where checks take time. Note: auto-merge relies on platform +support (GitHub has this; others may not). + +**`--force` mode**: Skip the `mergeable` gate. Useful when a reviewer override +is needed. Still respects platform-level branch protection. + +### 3.4 `gr2 pr checks` + +Shows CI/check status for the PR group. + +``` +gr2 pr checks [] + [--watch] + [--json] +``` + +Flow: + +1. Find the PR group. +2. For each child PR, call `PlatformAdapter.get_checks(repo, pr_number)`. +3. Print status per repo. + +`--watch` mode: Poll every 30 seconds and update the display. Emit +`pr.checks_passed` or `pr.checks_failed` events when the aggregate state +changes. + +### 3.5 `gr2 pr list` + +Lists PR groups in the workspace. + +``` +gr2 pr list + [--owner-unit ] + [--state open|merged|all] + [--json] +``` + +Flow: + +1. Scan `.grip/pr_groups/` for group metadata files. +2. Filter by owner_unit and state. +3. Print summary table. + +## 4. Merge Ordering + +When merging a PR group, the order matters. If `synapt-private` depends on +`synapt`, merging `synapt-private` first could break CI on the base branch. + +### 4.1 Default Order + +Merge in `[[repos]]` declaration order from WorkspaceSpec. This is the simplest +model and works when the workspace author has already ordered repos by +dependency. + +### 4.2 Explicit Order + +The workspace spec can declare merge ordering: + +```toml +[workspace_constraints.merge_order] +strategy = "explicit" +order = ["grip", "synapt", "synapt-private"] +``` + +### 4.3 Dependency-Aware Order (Future) + +A future extension could parse repo dependency graphs (e.g., pip dependencies, +Cargo workspace members) to derive merge order automatically. This is out of +scope for Sprint 20. + +### 4.4 Partial Merge Recovery + +If merge fails partway through (repo A merged, repo B failed): + +1. The PR group enters `partially_merged` state. +2. `pr.merge_failed` event is emitted for repo B. +3. The already-merged repo A cannot be un-merged. +4. Options: + a. Fix the issue in repo B and retry `gr2 pr merge`. + b. Revert repo A's merge manually and start over. + +This is the most dangerous failure mode in cross-repo PR management. The design +doc acknowledges it but does not try to solve it automatically. The right +mitigation is: + +- Run `gr2 pr checks` and confirm all checks pass before merging. +- Use `--auto` mode to let the platform gate each merge on checks. +- Keep the merge order aligned with dependency order so downstream repos + merge after their dependencies. + +## 5. PlatformAdapter Integration + +### 5.1 Adapter Protocol (Atlas's Design) + +gr2's PR lifecycle consumes Atlas's PlatformAdapter protocol. The expected +interface (from Atlas's `platform.py`): + +```python +class PlatformAdapter(Protocol): + def create_pr(self, repo: str, head: str, base: str, + title: str, body: str, draft: bool) -> PRRef: ... + def get_pr(self, repo: str, pr_number: int) -> PRStatus: ... + def merge_pr(self, repo: str, pr_number: int, + strategy: str) -> MergeResult: ... + def get_checks(self, repo: str, pr_number: int) -> list[PRCheck]: ... + def get_reviews(self, repo: str, pr_number: int) -> list[PRReview]: ... + def update_pr_body(self, repo: str, pr_number: int, body: str) -> None: ... +``` + +**PlatformAdapter is group-unaware.** It operates on individual per-repo PRs and +has no concept of `pr_group_id` or cross-repo correlation. The grouping logic +lives in gr2's `pr.py` orchestration module, which: + +1. Calls PlatformAdapter per-repo to create/query/merge individual PRs. +2. Assigns the `pr_group_id` (format: `pg_` + 8-char hex). +3. Correlates per-repo `PRRef` objects into a PR group. +4. Manages cross-link injection into PR bodies. +5. Emits `pr.*` events with the group ID. + +This separation keeps platform adapters simple and reusable. A platform adapter +can be used by other tools that don't need grouping semantics. + +### 5.2 Adapter Resolution + +`get_platform_adapter(repo_spec)` resolves the correct adapter based on the +repo's remote URL. For Sprint 20, only `GitHubAdapter` is implemented. + +### 5.3 Rate Limiting + +The adapter handles rate limiting internally. If the platform returns a rate +limit response, the adapter retries with backoff. gr2 does not manage rate +limits at the PR lifecycle level. + +### 5.4 Relation to gr1 HostingPlatform + +gr1's Rust `HostingPlatform` trait (in `src/platform/traits.rs`) covers the same +operations. The Python PlatformAdapter is the gr2 equivalent, designed for +Python-first UX validation. When Rust gr2 absorbs PR lifecycle, it should +reuse the existing `HostingPlatform` trait, not create a third adapter surface. + +The mapping: + +| gr1 Rust trait | gr2 Python adapter | +|----------------|--------------------| +| `create_pull_request` | `create_pr` | +| `get_pull_request` | `get_pr` | +| `merge_pull_request` | `merge_pr` | +| `get_status_checks` | `get_checks` | +| `get_pull_request_reviews` | `get_reviews` | +| `update_pull_request_body` | `update_pr_body` | +| `find_pr_by_branch` | Not yet in adapter (needed for `gr2 pr status` without group ID) | +| `is_pull_request_approved` | Derived from `get_reviews` | + +## 6. Event Emission + +PR lifecycle emits events defined in HOOK-EVENT-CONTRACT.md section 3.2. + +### 6.1 Create Flow Events + +``` +gr2 pr create + -> pr.created (payload: pr_group_id, repos with pr_numbers) +``` + +### 6.2 Status Check Events + +``` +gr2 pr status (or --watch poll) + -> pr.status_changed (per repo, when status differs from cached) + -> pr.checks_passed (per repo, when all checks go green) + -> pr.checks_failed (per repo, when a check fails) + -> pr.review_submitted (per repo, when new review detected) +``` + +### 6.3 Merge Flow Events + +``` +gr2 pr merge + -> pr.merged (if all repos merge successfully) + or + -> pr.merge_failed (for the first repo that fails) +``` + +### 6.4 Event Ordering + +Events are emitted in operation order. For `gr2 pr merge` with repos A, B, C: + +1. Merge A succeeds (no event yet; waiting for group completion). +2. Merge B succeeds (no event yet). +3. Merge C succeeds. +4. Emit `pr.merged` with all three repos' merge SHAs. + +If merge B fails: + +1. Merge A succeeds (no event for A alone). +2. Merge B fails. +3. Emit `pr.merge_failed` for B. +4. Do not attempt C. + +The design emits one event at the end, not per-repo events during merge. This +keeps the event stream clean: consumers see either one `pr.merged` or one +`pr.merge_failed`, not a mix. + +## 7. Review Requirements + +### 7.1 Compiled Requirements + +Review requirements come from the compiled WorkspaceSpec (originally from +premium's org policy): + +```toml +[workspace_constraints.required_reviewers] +grip = 1 +synapt = 1 +synapt-private = 2 +``` + +### 7.2 Evaluation + +`gr2 pr status` evaluates review requirements per repo: + +1. Get reviews from PlatformAdapter. +2. Count approvals (excluding stale reviews on outdated commits). +3. Compare against compiled requirement. +4. Report satisfied/unsatisfied per repo. + +This already exists in the Python CLI as `gr2 review requirements`. The PR +lifecycle integrates it into the `mergeable` gate. + +### 7.3 Boundary + +Review requirement **evaluation** (counting approvals against a threshold) is +OSS. Review requirement **definition** (who can review, role-based overrides, +org-level policies) is premium. gr2 only consumes the compiled numeric +threshold. + +## 8. Cross-Link Format + +When `gr2 pr create` creates linked PRs, it appends a standard section to each +PR body: + +```markdown +--- + +## gr2 PR Group: pg_8a3f1b2c + +| Repo | PR | +|------|----| +| grip | synapt-dev/grip#570 | +| synapt | synapt-dev/synapt#583 | +| synapt-private | synapt-dev/synapt-private#291 | + +Lane: `apollo/design/hook-event-contract` +Base: `sprint-20` + +*Managed by [gr2](https://github.com/synapt-dev/grip)* +``` + +This section is: +- Machine-parseable (table format with consistent columns). +- Human-readable on GitHub/GitLab. +- Identifiable by the `gr2 PR Group:` header for updates. + +When `gr2 pr status` detects a new child PR was added (e.g., a new repo was +added to the lane), it updates all sibling PR bodies to include the new link. + +## 9. Lane Integration + +### 9.1 Lane -> PR Group Mapping + +A lane can have at most one active PR group. Creating a second PR group for the +same lane replaces the first (the old group is archived). + +The mapping is: +- Forward: lane doc stores `pr_group_id` when a PR group is created. +- Reverse: PR group metadata stores `lane_name` and `owner_unit`. + +### 9.2 Lane Exit with Open PRs + +When `gr2 lane exit` is called while the lane has an open PR group: + +- The lane exit proceeds normally (stash dirty state, run on_exit hooks). +- The PR group remains open. PRs are on the platform; they do not depend on + the local lane state. +- `gr2 pr status` can still query the group even after the lane is exited. + +### 9.3 Lane Archive after Merge + +When `gr2 pr merge` completes successfully: + +- The PR group is marked as `merged`. +- The lane is eligible for archival (`lane.archived` event). +- Actual archival (deleting the lane root, cleaning up branches) is a separate + command or automated by spawn. + +## 10. Relation to gr1 + +gr1's `gr pr create/status/merge/checks` commands are the production surface +today. They work but have implicit cross-repo linking (via branch name +convention, not explicit group IDs). + +gr2's PR lifecycle improves on gr1 in three ways: + +1. **Explicit grouping**: PR groups with stable IDs replace implicit branch-name + matching. +2. **Event emission**: Every PR state change produces a durable event. +3. **Platform abstraction**: PlatformAdapter replaces direct `gh` CLI calls. + +The migration path: gr1 continues to handle daily PR workflow until gr2's PR +commands are proven. gr2 PR commands are validated in the playground first +(Sentinel's QA arena), then adopted for real workflow. + +## 11. Implementation Plan + +### Sprint 20 (Design) + +- This document. +- Event schema for pr.* types (done, in HOOK-EVENT-CONTRACT.md). +- Coordinate with Atlas on PlatformAdapter method signatures. +- Coordinate with Sentinel on QA scenarios for PR lifecycle. + +### Sprint 21 (Implementation Target) + +1. `gr2/python_cli/pr.py` module with PR group CRUD. +2. `gr2 pr create` command consuming PlatformAdapter. +3. `gr2 pr status` command with aggregated state. +4. `gr2 pr merge` command with ordering. +5. Event emission at each step. +6. Integration tests in QA arena. + +### Sprint 22 (Polish) + +- `gr2 pr checks --watch` with polling. +- `gr2 pr list` for workspace-wide PR overview. +- Auto-merge mode. +- Edge cases from QA arena feedback. + +## 12. QA Arena Scenarios + +These scenarios should be covered by Sentinel's adversarial test suite: + +1. **Happy path**: Create PR group with 3 repos, all checks pass, all reviews + met, merge succeeds. +2. **Partial merge failure**: Repo A merges, repo B has a conflict. Verify + `partially_merged` state and `pr.merge_failed` event. +3. **Review requirement not met**: One repo needs 2 reviews, only has 1. Verify + `gr2 pr merge` blocks (without `--force`). +4. **Stale review**: Review was approved, then new commits pushed. Verify the + stale review is not counted. +5. **PR created with no changes in some repos**: Verify repos with no new + commits are skipped, not given empty PRs. +6. **Rate limiting**: PlatformAdapter returns rate limit during `gr2 pr merge`. + Verify retry behavior. +7. **Platform timeout**: PlatformAdapter times out during `gr2 pr status`. + Verify graceful degradation (show cached state with warning). +8. **Concurrent merge**: Two agents try to merge the same PR group. Verify only + one succeeds (platform-level atomicity). +9. **Cross-link update**: New repo added to lane after initial PR creation. + Verify cross-links are updated in all sibling PRs. +10. **Auto-merge mode**: Enable auto-merge on all child PRs. Verify events are + emitted when platform auto-merges each PR. + +## 13. Open Questions + +1. **PR group ID persistence**: Should `pr_group_id` be stored in the lane doc + (tying it to local state) or only in `.grip/pr_groups/` (making it + workspace-level state)? Current design uses both for forward/reverse lookup. +2. **Multi-platform groups**: Can a PR group span repos on different platforms + (e.g., grip on GitHub, infra on GitLab)? The adapter-per-repo model supports + this, but merge ordering and cross-linking become more complex. +3. **PR updates after creation**: Should `gr2 pr update` exist to change title, + body, or base branch of an existing group? Or is that always done directly + on the platform? +4. **Branch cleanup**: Should `gr2 pr merge` automatically delete remote + branches after merge? gr1 does this. gr2 should probably follow suit but + it is a destructive operation. +5. **Manifest repo PRs**: Should the manifest repo (if tracked) get its own + child PR in the group? gr1 includes the manifest in PR operations. gr2's + lane model may not always include the manifest. From 0725867e74b79d54f6f353f03f822002764e3dd1 Mon Sep 17 00:00:00 2001 From: Layne Penney Date: Wed, 15 Apr 2026 13:07:35 -0500 Subject: [PATCH 07/18] feat: gr2 event system runtime (emit, outbox, cursors, bridge, hooks, PR lifecycle) 98 tests across 6 domains: - EventType enum (31 types: lane, lease, hook, PR, sync, recovery, workspace) - emit() with flat JSONL outbox, fcntl locking, 10MB rotation, cursor consumers - Lane/lease event wiring in app.py - Channel bridge consumer (11 mapped event types) - Hook execution events (started/completed/failed/skipped) - PR lifecycle events via pr.py orchestration layer Co-Authored-By: Claude Opus 4.6 --- gr2/python_cli/app.py | 71 ++++- gr2/python_cli/channel_bridge.py | 113 +++++++ gr2/python_cli/events.py | 240 +++++++++++++++ gr2/python_cli/hooks.py | 59 ++++ gr2/python_cli/pr.py | 254 +++++++++++++++ gr2/tests/__init__.py | 0 gr2/tests/conftest.py | 15 + gr2/tests/test_channel_bridge.py | 333 ++++++++++++++++++++ gr2/tests/test_events.py | 493 ++++++++++++++++++++++++++++++ gr2/tests/test_hook_events.py | 327 ++++++++++++++++++++ gr2/tests/test_pr_events.py | 510 +++++++++++++++++++++++++++++++ 11 files changed, 2414 insertions(+), 1 deletion(-) create mode 100644 gr2/python_cli/channel_bridge.py create mode 100644 gr2/python_cli/events.py create mode 100644 gr2/python_cli/pr.py create mode 100644 gr2/tests/__init__.py create mode 100644 gr2/tests/conftest.py create mode 100644 gr2/tests/test_channel_bridge.py create mode 100644 gr2/tests/test_events.py create mode 100644 gr2/tests/test_hook_events.py create mode 100644 gr2/tests/test_pr_events.py diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index be29f21..0fa3843 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -23,6 +23,7 @@ repo_dirty, stash_if_dirty, ) +from .events import emit, EventType from .hooks import HookContext, apply_file_projections, load_repo_hooks, run_lifecycle_stage from . import spec_apply from gr2.prototypes import lane_workspace_prototype as lane_proto @@ -607,6 +608,28 @@ def lane_create( ) _exit(lane_proto.create_lane(ns)) _materialize_lane_repos(workspace_root, owner_unit, lane_name, manual_hooks=manual_hooks) + repo_list = [r.strip() for r in repos.split(",")] + branch_parts = branch.split(",") + branch_map = {} + for part in branch_parts: + if "=" in part: + k, v = part.split("=", 1) + branch_map[k.strip()] = v.strip() + else: + for r in repo_list: + branch_map[r] = part.strip() + emit( + event_type=EventType.LANE_CREATED, + workspace_root=workspace_root, + actor=source, + owner_unit=owner_unit, + payload={ + "lane_name": lane_name, + "lane_type": lane_type, + "repos": repo_list, + "branch_map": branch_map, + }, + ) @lane_app.command("enter") @@ -631,6 +654,18 @@ def lane_enter( recall=recall, ) _exit(lane_proto.enter_lane(ns)) + lane_doc = lane_proto.load_lane_doc(workspace_root, owner_unit, lane_name) + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace_root, + actor=actor, + owner_unit=owner_unit, + payload={ + "lane_name": lane_name, + "lane_type": lane_doc.get("type", "feature"), + "repos": lane_doc.get("repos", []), + }, + ) @lane_app.command("exit") @@ -647,10 +682,12 @@ def lane_exit( current_doc = lane_proto.load_current_lane_doc(workspace_root, owner_unit) lane_name = current_doc["current"]["lane_name"] lane_doc = lane_proto.load_lane_doc(workspace_root, owner_unit, lane_name) + stashed_repos: list[str] = [] for repo_name in lane_doc.get("repos", []): repo_root = _lane_repo_root(workspace_root, owner_unit, lane_name, repo_name) if repo_root.exists(): - stash_if_dirty(repo_root, f"gr2 exit {owner_unit}/{lane_name}") + if stash_if_dirty(repo_root, f"gr2 exit {owner_unit}/{lane_name}"): + stashed_repos.append(repo_name) _run_lane_stage(workspace_root, owner_unit, lane_name, "on_exit", manual_hooks=manual_hooks) ns = SimpleNamespace( workspace_root=workspace_root, @@ -660,6 +697,16 @@ def lane_exit( recall=recall, ) _exit(lane_proto.exit_lane(ns)) + emit( + event_type=EventType.LANE_EXITED, + workspace_root=workspace_root, + actor=actor, + owner_unit=owner_unit, + payload={ + "lane_name": lane_name, + "stashed_repos": stashed_repos, + }, + ) @lane_app.command("current") @@ -698,6 +745,18 @@ def lane_lease_acquire( force=force, ) _exit(lane_proto.acquire_lane_lease(ns)) + emit( + event_type=EventType.LEASE_ACQUIRED, + workspace_root=workspace_root, + actor=actor, + owner_unit=owner_unit, + payload={ + "lane_name": lane_name, + "mode": mode, + "ttl_seconds": ttl_seconds, + "lease_id": f"{owner_unit}:{lane_name}", + }, + ) @lease_app.command("release") @@ -715,6 +774,16 @@ def lane_lease_release( actor=actor, ) _exit(lane_proto.release_lane_lease(ns)) + emit( + event_type=EventType.LEASE_RELEASED, + workspace_root=workspace_root, + actor=actor, + owner_unit=owner_unit, + payload={ + "lane_name": lane_name, + "lease_id": f"{owner_unit}:{lane_name}", + }, + ) @lease_app.command("show") diff --git a/gr2/python_cli/channel_bridge.py b/gr2/python_cli/channel_bridge.py new file mode 100644 index 0000000..a4c08e0 --- /dev/null +++ b/gr2/python_cli/channel_bridge.py @@ -0,0 +1,113 @@ +"""gr2 channel bridge consumer. + +Translates outbox events into channel messages per the mapping table in +HOOK-EVENT-CONTRACT.md section 8. Uses cursor-based consumption from +events.read_events(). + +The bridge is a pure function layer: format_event() maps an event dict to +a message string (or None), and run_bridge() orchestrates cursor reads and +posts via a caller-provided post_fn. This keeps the MCP/recall_channel +dependency out of the module and makes it fully testable. +""" +from __future__ import annotations + +from pathlib import Path +from typing import Callable + +from .events import read_events + + +_CONSUMER_NAME = "channel_bridge" + + +def format_event(event: dict[str, object]) -> str | None: + """Apply the section 8 mapping table to produce a channel message. + + Returns None if the event type is not mapped (silently dropped). + """ + etype = event.get("type", "") + + if etype == "lane.created": + return ( + f"{event['actor']} created lane {event['lane_name']}" + f" [{event.get('lane_type', 'unknown')}]" + f" repos={event.get('repos', [])}" + ) + + if etype == "lane.entered": + return f"{event['actor']} entered {event['owner_unit']}/{event['lane_name']}" + + if etype == "lane.exited": + return f"{event['actor']} exited {event['owner_unit']}/{event['lane_name']}" + + if etype == "pr.created": + repos = event.get("repos", []) + if isinstance(repos, list) and repos and isinstance(repos[0], dict): + repo_names = [r.get("repo", "") for r in repos] + else: + repo_names = repos + return ( + f"{event['actor']} opened PR group {event['pr_group_id']}: {repo_names}" + ) + + if etype == "pr.merged": + return f"{event['actor']} merged PR group {event['pr_group_id']}" + + if etype == "pr.checks_failed": + failed = event.get("failed_checks", []) + return f"CI failed on {event['repo']}#{event['pr_number']}: {failed}" + + if etype == "hook.failed": + # Only blocking hook failures produce channel messages. + if event.get("on_failure") != "block": + return None + return ( + f"Hook {event['hook_name']} failed in {event['repo']}" + f" (blocking): {event.get('stderr_tail', '')}" + ) + + if etype == "sync.conflict": + files = event.get("conflicting_files", []) + return f"Sync conflict in {event['repo']}: {files}" + + if etype == "lease.force_broken": + return ( + f"Lease on {event['lane_name']} force-broken" + f" by {event['broken_by']}: {event.get('reason', '')}" + ) + + if etype == "failure.resolved": + return ( + f"{event['resolved_by']} resolved failure" + f" {event['operation_id']} on {event['lane_name']}" + ) + + if etype == "lease.reclaimed": + return ( + f"Stale lease on {event['lane_name']} reclaimed" + f" (was held by {event['previous_holder']})" + ) + + # Unmapped event type: silently dropped. + return None + + +def run_bridge( + workspace_root: Path, + *, + post_fn: Callable[[str], object], +) -> int: + """Read new events from the outbox and post mapped messages. + + Uses the 'channel_bridge' cursor. Returns the number of messages posted. + The post_fn receives formatted message strings; the caller decides how to + deliver them (recall_channel, print, log, etc.). + """ + events = read_events(workspace_root, _CONSUMER_NAME) + posted = 0 + for event in events: + msg = format_event(event) + if msg is not None: + post_fn(msg) + posted += 1 + return posted diff --git a/gr2/python_cli/events.py b/gr2/python_cli/events.py new file mode 100644 index 0000000..2758e0d --- /dev/null +++ b/gr2/python_cli/events.py @@ -0,0 +1,240 @@ +"""gr2 event system runtime. + +Implements the event contract from HOOK-EVENT-CONTRACT.md sections 3-8: +- EventType enum (section 7.2) +- emit() function (sections 4.2, 7.1) +- Outbox management with rotation (sections 4.1-4.4) +- Cursor-based consumer model (section 5.1) +""" +from __future__ import annotations + +import fcntl +import json +import os +import sys +from datetime import datetime, timezone +from enum import Enum +from pathlib import Path + + +# Reserved field names that payload keys must not collide with (section 3.1). +_RESERVED_NAMES = frozenset({ + "version", "event_id", "seq", "timestamp", "type", + "workspace", "actor", "agent_id", "owner_unit", +}) + +_ROTATION_THRESHOLD = 10 * 1024 * 1024 # 10 MB + + +class EventType(str, Enum): + # Lane lifecycle + LANE_CREATED = "lane.created" + LANE_ENTERED = "lane.entered" + LANE_EXITED = "lane.exited" + LANE_SWITCHED = "lane.switched" + LANE_ARCHIVED = "lane.archived" + + # Lease lifecycle + LEASE_ACQUIRED = "lease.acquired" + LEASE_RELEASED = "lease.released" + LEASE_EXPIRED = "lease.expired" + LEASE_FORCE_BROKEN = "lease.force_broken" + + # Hook execution + HOOK_STARTED = "hook.started" + HOOK_COMPLETED = "hook.completed" + HOOK_FAILED = "hook.failed" + HOOK_SKIPPED = "hook.skipped" + + # PR lifecycle + PR_CREATED = "pr.created" + PR_STATUS_CHANGED = "pr.status_changed" + PR_CHECKS_PASSED = "pr.checks_passed" + PR_CHECKS_FAILED = "pr.checks_failed" + PR_REVIEW_SUBMITTED = "pr.review_submitted" + PR_MERGED = "pr.merged" + PR_MERGE_FAILED = "pr.merge_failed" + + # Sync operations + SYNC_STARTED = "sync.started" + SYNC_REPO_UPDATED = "sync.repo_updated" + SYNC_REPO_SKIPPED = "sync.repo_skipped" + SYNC_CONFLICT = "sync.conflict" + SYNC_COMPLETED = "sync.completed" + SYNC_CACHE_SEEDED = "sync.cache_seeded" + SYNC_CACHE_REFRESHED = "sync.cache_refreshed" + + # Recovery + FAILURE_RESOLVED = "failure.resolved" + LEASE_RECLAIMED = "lease.reclaimed" + + # Workspace operations + WORKSPACE_MATERIALIZED = "workspace.materialized" + WORKSPACE_FILE_PROJECTED = "workspace.file_projected" + + +def _outbox_path(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" / "outbox.jsonl" + + +def _cursors_dir(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" / "cursors" + + +def _current_seq(outbox: Path) -> int: + """Return the highest seq in the outbox, or 0 if empty/missing.""" + if not outbox.exists(): + return 0 + try: + text = outbox.read_text() + except OSError: + return 0 + last_seq = 0 + for line in text.strip().split("\n"): + line = line.strip() + if not line: + continue + try: + obj = json.loads(line) + if isinstance(obj, dict) and "seq" in obj: + last_seq = max(last_seq, obj["seq"]) + except (json.JSONDecodeError, TypeError): + continue + return last_seq + + +def _maybe_rotate(outbox: Path) -> None: + """Rotate the outbox file if it exceeds the size threshold.""" + if not outbox.exists(): + return + try: + size = outbox.stat().st_size + except OSError: + return + if size <= _ROTATION_THRESHOLD: + return + ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%S") + archive = outbox.parent / f"outbox.{ts}.jsonl" + outbox.rename(archive) + + +def emit( + event_type: EventType, + workspace_root: Path, + actor: str, + owner_unit: str, + payload: dict[str, object], + *, + agent_id: str | None = None, +) -> None: + """Emit a single event to the workspace outbox. + + Builds a flat JSON object from envelope + context + payload fields and + appends it as one line to .grip/events/outbox.jsonl. + + Does not raise on write failure (section 10.1). Errors are logged to + stderr so the parent operation can continue. + """ + # Validate payload keys against reserved names. + collisions = _RESERVED_NAMES & payload.keys() + if collisions: + raise ValueError( + f"payload keys collide with reserved envelope/context names: {collisions}" + ) + + try: + outbox = _outbox_path(workspace_root) + outbox.parent.mkdir(parents=True, exist_ok=True) + lock_path = outbox.with_suffix(".lock") + + with lock_path.open("a+") as lock_fh: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX) + try: + # Capture seq before rotation (rotation empties the current file). + seq = _current_seq(outbox) + 1 + _maybe_rotate(outbox) + + # Build flat event object. + event: dict[str, object] = { + "version": 1, + "event_id": os.urandom(8).hex(), + "seq": seq, + "timestamp": datetime.now(timezone.utc).isoformat(), + "type": str(event_type.value), + "workspace": workspace_root.name, + "actor": actor, + "owner_unit": owner_unit, + } + if agent_id is not None: + event["agent_id"] = agent_id + event.update(payload) + + # Append as single JSONL line. + with outbox.open("a") as f: + f.write(json.dumps(event, separators=(",", ":")) + "\n") + f.flush() + finally: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) + + except Exception as exc: + print(f"gr2: event emit failed: {exc}", file=sys.stderr) + + +def read_events(workspace_root: Path, consumer: str) -> list[dict[str, object]]: + """Read new events from the outbox for the named consumer. + + Returns events with seq > cursor's last_seq. Updates the cursor file + atomically after reading. + """ + outbox = _outbox_path(workspace_root) + if not outbox.exists(): + return [] + + cursor = _load_cursor(workspace_root, consumer) + last_seq = cursor.get("last_seq", 0) + + events: list[dict[str, object]] = [] + text = outbox.read_text() + for line in text.strip().split("\n"): + line = line.strip() + if not line: + continue + try: + obj = json.loads(line) + except json.JSONDecodeError: + continue + if not isinstance(obj, dict): + continue + if obj.get("seq", 0) <= last_seq: + continue + events.append(obj) + + if events: + last_event = events[-1] + _save_cursor(workspace_root, consumer, { + "consumer": consumer, + "last_seq": last_event["seq"], + "last_event_id": last_event.get("event_id", ""), + "last_read": datetime.now(timezone.utc).isoformat(), + }) + + return events + + +def _load_cursor(workspace_root: Path, consumer: str) -> dict[str, object]: + cursor_file = _cursors_dir(workspace_root) / f"{consumer}.json" + if not cursor_file.exists(): + return {} + try: + return json.loads(cursor_file.read_text()) + except (json.JSONDecodeError, OSError): + return {} + + +def _save_cursor(workspace_root: Path, consumer: str, data: dict[str, object]) -> None: + cursors = _cursors_dir(workspace_root) + cursors.mkdir(parents=True, exist_ok=True) + cursor_file = cursors / f"{consumer}.json" + tmp = cursor_file.with_suffix(".tmp") + tmp.write_text(json.dumps(data, indent=2)) + tmp.rename(cursor_file) diff --git a/gr2/python_cli/hooks.py b/gr2/python_cli/hooks.py index 460b15f..41c4bf1 100644 --- a/gr2/python_cli/hooks.py +++ b/gr2/python_cli/hooks.py @@ -4,9 +4,12 @@ import json import sys import subprocess +import time import tomllib from pathlib import Path +from .events import emit, EventType + VALID_IF_EXISTS = {"skip", "overwrite", "merge", "error"} VALID_ON_FAILURE = {"block", "warn", "skip"} @@ -282,6 +285,18 @@ def run_lifecycle_stage( first_materialize=first_materialize, allow_manual=allow_manual, ): + emit( + event_type=EventType.HOOK_SKIPPED, + workspace_root=ctx.workspace_root, + actor="system", + owner_unit=ctx.lane_owner, + payload={ + "stage": stage, + "hook_name": hook.name, + "repo": ctx.repo_name, + "reason": f"when={hook.when} did not match current invocation", + }, + ) results.append( HookResult( kind="lifecycle", @@ -293,6 +308,20 @@ def run_lifecycle_stage( continue cwd = render_path(hook.cwd, ctx) command = render_text(hook.command, ctx) + emit( + event_type=EventType.HOOK_STARTED, + workspace_root=ctx.workspace_root, + actor="system", + owner_unit=ctx.lane_owner, + payload={ + "stage": stage, + "hook_name": hook.name, + "repo": ctx.repo_name, + "command": command, + "cwd": str(cwd), + }, + ) + t0 = time.monotonic() proc = subprocess.run( command, cwd=cwd, @@ -300,7 +329,21 @@ def run_lifecycle_stage( capture_output=True, text=True, ) + duration_ms = int((time.monotonic() - t0) * 1000) if proc.returncode == 0: + emit( + event_type=EventType.HOOK_COMPLETED, + workspace_root=ctx.workspace_root, + actor="system", + owner_unit=ctx.lane_owner, + payload={ + "stage": stage, + "hook_name": hook.name, + "repo": ctx.repo_name, + "duration_ms": duration_ms, + "exit_code": 0, + }, + ) results.append( HookResult( kind="lifecycle", @@ -315,6 +358,22 @@ def run_lifecycle_stage( ) ) continue + stderr_tail = proc.stderr[-500:] if proc.stderr else "" + emit( + event_type=EventType.HOOK_FAILED, + workspace_root=ctx.workspace_root, + actor="system", + owner_unit=ctx.lane_owner, + payload={ + "stage": stage, + "hook_name": hook.name, + "repo": ctx.repo_name, + "duration_ms": duration_ms, + "exit_code": proc.returncode, + "on_failure": hook.on_failure, + "stderr_tail": stderr_tail, + }, + ) payload = { "kind": "lifecycle", "stage": stage, diff --git a/gr2/python_cli/pr.py b/gr2/python_cli/pr.py new file mode 100644 index 0000000..3ccd113 --- /dev/null +++ b/gr2/python_cli/pr.py @@ -0,0 +1,254 @@ +"""gr2 PR group orchestration. + +Implements multi-repo PR lifecycle from PR-LIFECYCLE.md: +- create_pr_group: Create linked PRs across repos with pr_group_id +- merge_pr_group: Merge all PRs in a group (stops on first failure) +- check_pr_group_status: Poll status/checks and emit change events +- record_pr_review: Record an externally-submitted review event + +The PlatformAdapter is group-unaware. This module assigns pr_group_id, +persists group metadata, and emits events per HOOK-EVENT-CONTRACT.md +section 3.2 (PR Lifecycle). +""" +from __future__ import annotations + +import json +import os +from pathlib import Path + +from .events import emit, EventType +from .platform import AdapterError, CreatePRRequest, PlatformAdapter + + +class PRMergeError(RuntimeError): + """Raised when a PR merge fails.""" + + def __init__(self, repo: str, pr_number: int, reason: str) -> None: + self.repo = repo + self.pr_number = pr_number + self.reason = reason + super().__init__(f"merge failed for {repo}#{pr_number}: {reason}") + + +def _pr_groups_dir(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "pr_groups" + + +def _generate_group_id() -> str: + return "pg_" + os.urandom(4).hex() + + +def _load_group(workspace_root: Path, pr_group_id: str) -> dict: + path = _pr_groups_dir(workspace_root) / f"{pr_group_id}.json" + return json.loads(path.read_text()) + + +def _save_group(workspace_root: Path, group: dict) -> None: + d = _pr_groups_dir(workspace_root) + d.mkdir(parents=True, exist_ok=True) + path = d / f"{group['pr_group_id']}.json" + path.write_text(json.dumps(group, indent=2)) + + +def create_pr_group( + workspace_root: Path, + owner_unit: str, + lane_name: str, + title: str, + base_branch: str, + head_branch: str, + repos: list[str], + adapter: PlatformAdapter, + actor: str, + *, + body: str = "", + draft: bool = False, +) -> dict: + """Create linked PRs across repos and emit pr.created.""" + pr_group_id = _generate_group_id() + prs: list[dict] = [] + + for repo in repos: + request = CreatePRRequest( + repo=repo, + title=title, + body=body, + head_branch=head_branch, + base_branch=base_branch, + draft=draft, + ) + ref = adapter.create_pr(request) + prs.append({ + "repo": repo, + "pr_number": ref.number, + "url": ref.url, + }) + + group = { + "pr_group_id": pr_group_id, + "lane_name": lane_name, + "title": title, + "base_branch": base_branch, + "head_branch": head_branch, + "prs": prs, + "status": {repo: "OPEN" for repo in repos}, + } + _save_group(workspace_root, group) + + emit( + event_type=EventType.PR_CREATED, + workspace_root=workspace_root, + actor=actor, + owner_unit=owner_unit, + payload={ + "pr_group_id": pr_group_id, + "lane_name": lane_name, + "repos": prs, + }, + ) + + return group + + +def merge_pr_group( + workspace_root: Path, + pr_group_id: str, + adapter: PlatformAdapter, + actor: str, +) -> dict: + """Merge all PRs in a group. Stops on first failure.""" + group = _load_group(workspace_root, pr_group_id) + merged: list[dict] = [] + + for pr_info in group["prs"]: + repo = pr_info["repo"] + number = pr_info["pr_number"] + try: + adapter.merge_pr(repo, number) + except AdapterError as exc: + emit( + event_type=EventType.PR_MERGE_FAILED, + workspace_root=workspace_root, + actor=actor, + owner_unit=group.get("owner_unit", actor), + payload={ + "pr_group_id": pr_group_id, + "repo": repo, + "pr_number": number, + "reason": str(exc), + }, + ) + raise PRMergeError(repo, number, str(exc)) from exc + merged.append(pr_info) + + emit( + event_type=EventType.PR_MERGED, + workspace_root=workspace_root, + actor=actor, + owner_unit=group.get("owner_unit", actor), + payload={ + "pr_group_id": pr_group_id, + "repos": merged, + }, + ) + + return group + + +def check_pr_group_status( + workspace_root: Path, + pr_group_id: str, + adapter: PlatformAdapter, + actor: str, +) -> dict: + """Poll PR status/checks for all repos in a group. Emit change events.""" + group = _load_group(workspace_root, pr_group_id) + cached_status = group.get("status", {}) + + for pr_info in group["prs"]: + repo = pr_info["repo"] + number = pr_info["pr_number"] + status = adapter.pr_status(repo, number) + old_state = cached_status.get(repo, "OPEN") + + # Detect state change (OPEN -> MERGED, OPEN -> CLOSED, etc.) + if status.state != old_state: + emit( + event_type=EventType.PR_STATUS_CHANGED, + workspace_root=workspace_root, + actor=actor, + owner_unit=group.get("owner_unit", actor), + payload={ + "pr_group_id": pr_group_id, + "repo": repo, + "pr_number": number, + "old_status": old_state, + "new_status": status.state, + }, + ) + cached_status[repo] = status.state + + # Detect check results (only when checks are complete) + if status.checks: + completed = [c for c in status.checks if c.status == "COMPLETED"] + if completed and len(completed) == len(status.checks): + failed = [c.name for c in completed if c.conclusion != "SUCCESS"] + if failed: + emit( + event_type=EventType.PR_CHECKS_FAILED, + workspace_root=workspace_root, + actor=actor, + owner_unit=group.get("owner_unit", actor), + payload={ + "pr_group_id": pr_group_id, + "repo": repo, + "pr_number": number, + "failed_checks": failed, + }, + ) + else: + emit( + event_type=EventType.PR_CHECKS_PASSED, + workspace_root=workspace_root, + actor=actor, + owner_unit=group.get("owner_unit", actor), + payload={ + "pr_group_id": pr_group_id, + "repo": repo, + "pr_number": number, + "passed_checks": [c.name for c in completed], + }, + ) + + group["status"] = cached_status + _save_group(workspace_root, group) + return group + + +def record_pr_review( + workspace_root: Path, + pr_group_id: str, + repo: str, + pr_number: int, + reviewer: str, + state: str, + actor: str, +) -> None: + """Record an externally-submitted PR review and emit pr.review_submitted. + + Reviews come from outside gr2 (GitHub webhooks, human action, etc.). + The adapter doesn't query reviews, so this is a push-model entry point. + """ + emit( + event_type=EventType.PR_REVIEW_SUBMITTED, + workspace_root=workspace_root, + actor=actor, + owner_unit=actor, + payload={ + "pr_group_id": pr_group_id, + "repo": repo, + "pr_number": pr_number, + "reviewer": reviewer, + "state": state, + }, + ) diff --git a/gr2/tests/__init__.py b/gr2/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/gr2/tests/conftest.py b/gr2/tests/conftest.py new file mode 100644 index 0000000..244cb15 --- /dev/null +++ b/gr2/tests/conftest.py @@ -0,0 +1,15 @@ +"""Shared fixtures for gr2 tests.""" +from __future__ import annotations + +import pytest +from pathlib import Path + + +@pytest.fixture +def workspace(tmp_path: Path) -> Path: + """Create a minimal workspace with .grip/ directory.""" + grip = tmp_path / ".grip" + grip.mkdir() + events = grip / "events" + events.mkdir() + return tmp_path diff --git a/gr2/tests/test_channel_bridge.py b/gr2/tests/test_channel_bridge.py new file mode 100644 index 0000000..5fa7c70 --- /dev/null +++ b/gr2/tests/test_channel_bridge.py @@ -0,0 +1,333 @@ +"""Tests for gr2 channel bridge consumer. + +Tests the event-to-channel-message mapping from HOOK-EVENT-CONTRACT.md +section 8. Written TDD-first. +""" +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + + +# --------------------------------------------------------------------------- +# 1. format_event() message templates (section 8 mapping table) +# --------------------------------------------------------------------------- + +class TestFormatEvent: + """format_event() applies the mapping table to produce channel messages.""" + + def test_lane_created(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "lane.created", + "actor": "agent:apollo", + "owner_unit": "apollo", + "lane_name": "feat/hook-events", + "lane_type": "feature", + "repos": ["grip", "synapt"], + } + msg = format_event(event) + assert msg == "agent:apollo created lane feat/hook-events [feature] repos=['grip', 'synapt']" + + def test_lane_entered(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "lane.entered", + "actor": "agent:apollo", + "owner_unit": "apollo", + "lane_name": "feat/hook-events", + } + msg = format_event(event) + assert msg == "agent:apollo entered apollo/feat/hook-events" + + def test_lane_exited(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "lane.exited", + "actor": "agent:apollo", + "owner_unit": "apollo", + "lane_name": "feat/hook-events", + } + msg = format_event(event) + assert msg == "agent:apollo exited apollo/feat/hook-events" + + def test_pr_created(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "pr.created", + "actor": "agent:apollo", + "pr_group_id": "pg_8a3f1b2c", + "repos": [{"repo": "grip", "pr_number": 570}, {"repo": "synapt", "pr_number": 583}], + } + msg = format_event(event) + assert "pg_8a3f1b2c" in msg + assert "agent:apollo" in msg + + def test_pr_merged(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "pr.merged", + "actor": "agent:apollo", + "pr_group_id": "pg_8a3f1b2c", + } + msg = format_event(event) + assert msg == "agent:apollo merged PR group pg_8a3f1b2c" + + def test_pr_checks_failed(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "pr.checks_failed", + "repo": "grip", + "pr_number": 574, + "failed_checks": ["ci/test", "ci/lint"], + } + msg = format_event(event) + assert "grip#574" in msg + assert "ci/test" in msg + + def test_hook_failed_block(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "hook.failed", + "hook_name": "editable-install", + "repo": "synapt", + "on_failure": "block", + "stderr_tail": "pip install failed", + } + msg = format_event(event) + assert "editable-install" in msg + assert "synapt" in msg + assert "blocking" in msg + + def test_hook_failed_warn_not_mapped(self): + """hook.failed with on_failure=warn should NOT produce a channel message.""" + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "hook.failed", + "hook_name": "lint", + "repo": "synapt", + "on_failure": "warn", + "stderr_tail": "lint warnings", + } + msg = format_event(event) + assert msg is None + + def test_hook_failed_skip_not_mapped(self): + """hook.failed with on_failure=skip should NOT produce a channel message.""" + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "hook.failed", + "hook_name": "optional", + "repo": "synapt", + "on_failure": "skip", + "stderr_tail": "skipped", + } + msg = format_event(event) + assert msg is None + + def test_sync_conflict(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "sync.conflict", + "repo": "synapt", + "conflicting_files": ["src/main.py", "tests/test_core.py"], + } + msg = format_event(event) + assert "synapt" in msg + assert "src/main.py" in msg + + def test_lease_force_broken(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "lease.force_broken", + "lane_name": "feat/hook-events", + "broken_by": "agent:sentinel", + "reason": "stale session", + } + msg = format_event(event) + assert "feat/hook-events" in msg + assert "agent:sentinel" in msg + assert "stale session" in msg + + def test_failure_resolved(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "failure.resolved", + "resolved_by": "agent:apollo", + "operation_id": "op_9f2a3b4c", + "lane_name": "feat/hook-events", + } + msg = format_event(event) + assert "agent:apollo" in msg + assert "op_9f2a3b4c" in msg + assert "feat/hook-events" in msg + + def test_lease_reclaimed(self): + from gr2.python_cli.channel_bridge import format_event + event = { + "type": "lease.reclaimed", + "lane_name": "feat/hook-events", + "previous_holder": "agent:atlas", + } + msg = format_event(event) + assert "feat/hook-events" in msg + assert "agent:atlas" in msg + + +# --------------------------------------------------------------------------- +# 2. Unmapped events return None (section 8 exclusion list) +# --------------------------------------------------------------------------- + +class TestUnmappedEvents: + """Events not in the mapping table produce no channel message.""" + + @pytest.mark.parametrize("event_type", [ + "hook.started", + "hook.completed", + "hook.skipped", + "lease.acquired", + "lease.released", + "lease.expired", + "sync.started", + "sync.repo_updated", + "sync.repo_skipped", + "sync.completed", + "workspace.materialized", + "workspace.file_projected", + "lane.switched", + "lane.archived", + ]) + def test_unmapped_returns_none(self, event_type): + from gr2.python_cli.channel_bridge import format_event + event = {"type": event_type, "actor": "agent:test", "owner_unit": "test"} + assert format_event(event) is None + + +# --------------------------------------------------------------------------- +# 3. run_bridge() cursor-based consumption +# --------------------------------------------------------------------------- + +class TestRunBridge: + """run_bridge() reads events via cursor and calls post_fn for each.""" + + def test_processes_mapped_events(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + from gr2.python_cli.channel_bridge import run_bridge + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + posted: list[str] = [] + run_bridge(workspace, post_fn=posted.append) + assert len(posted) == 1 + assert "agent:apollo entered apollo/feat/test" in posted[0] + + def test_skips_unmapped_events(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + from gr2.python_cli.channel_bridge import run_bridge + emit( + event_type=EventType.LEASE_ACQUIRED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "mode": "edit", "ttl_seconds": 900, "lease_id": "x"}, + ) + posted: list[str] = [] + run_bridge(workspace, post_fn=posted.append) + assert len(posted) == 0 + + def test_cursor_advances(self, workspace: Path): + """Second run_bridge call returns nothing if no new events.""" + from gr2.python_cli.events import emit, EventType + from gr2.python_cli.channel_bridge import run_bridge + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + posted: list[str] = [] + run_bridge(workspace, post_fn=posted.append) + assert len(posted) == 1 + # Second call: cursor advanced, no new events + posted.clear() + run_bridge(workspace, post_fn=posted.append) + assert len(posted) == 0 + + def test_processes_only_new_events(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + from gr2.python_cli.channel_bridge import run_bridge + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "first", "lane_type": "feature", "repos": ["grip"]}, + ) + posted: list[str] = [] + run_bridge(workspace, post_fn=posted.append) + assert len(posted) == 1 + # Emit a new event + emit( + event_type=EventType.LANE_EXITED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "first", "stashed_repos": []}, + ) + posted.clear() + run_bridge(workspace, post_fn=posted.append) + assert len(posted) == 1 + assert "exited" in posted[0] + + def test_mixed_mapped_and_unmapped(self, workspace: Path): + """Only mapped events produce messages; unmapped are silently skipped.""" + from gr2.python_cli.events import emit, EventType + from gr2.python_cli.channel_bridge import run_bridge + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + emit( + event_type=EventType.LEASE_ACQUIRED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "mode": "edit", "ttl_seconds": 900, "lease_id": "x"}, + ) + emit( + event_type=EventType.LANE_EXITED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "stashed_repos": ["grip"]}, + ) + posted: list[str] = [] + run_bridge(workspace, post_fn=posted.append) + # lane.entered and lane.exited are mapped; lease.acquired is not + assert len(posted) == 2 + assert "entered" in posted[0] + assert "exited" in posted[1] + + def test_returns_count(self, workspace: Path): + """run_bridge returns the number of messages posted.""" + from gr2.python_cli.events import emit, EventType + from gr2.python_cli.channel_bridge import run_bridge + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + result = run_bridge(workspace, post_fn=lambda msg: None) + assert result == 1 diff --git a/gr2/tests/test_events.py b/gr2/tests/test_events.py new file mode 100644 index 0000000..77cdf5f --- /dev/null +++ b/gr2/tests/test_events.py @@ -0,0 +1,493 @@ +"""Tests for gr2 event system runtime. + +These tests define the contract from HOOK-EVENT-CONTRACT.md sections 3-8. +Written TDD-first: they must fail until events.py is implemented. +""" +from __future__ import annotations + +import json +import os +from datetime import datetime, timezone +from pathlib import Path + +import pytest + + +# --------------------------------------------------------------------------- +# 1. EventType enum (section 7.2) +# --------------------------------------------------------------------------- + +class TestEventTypeEnum: + """EventType enum must contain all 28 event types from the taxonomy.""" + + def test_import(self): + from gr2.python_cli.events import EventType + assert EventType is not None + + def test_lane_lifecycle_types(self): + from gr2.python_cli.events import EventType + assert EventType.LANE_CREATED == "lane.created" + assert EventType.LANE_ENTERED == "lane.entered" + assert EventType.LANE_EXITED == "lane.exited" + assert EventType.LANE_SWITCHED == "lane.switched" + assert EventType.LANE_ARCHIVED == "lane.archived" + + def test_lease_lifecycle_types(self): + from gr2.python_cli.events import EventType + assert EventType.LEASE_ACQUIRED == "lease.acquired" + assert EventType.LEASE_RELEASED == "lease.released" + assert EventType.LEASE_EXPIRED == "lease.expired" + assert EventType.LEASE_FORCE_BROKEN == "lease.force_broken" + + def test_hook_execution_types(self): + from gr2.python_cli.events import EventType + assert EventType.HOOK_STARTED == "hook.started" + assert EventType.HOOK_COMPLETED == "hook.completed" + assert EventType.HOOK_FAILED == "hook.failed" + assert EventType.HOOK_SKIPPED == "hook.skipped" + + def test_pr_lifecycle_types(self): + from gr2.python_cli.events import EventType + assert EventType.PR_CREATED == "pr.created" + assert EventType.PR_STATUS_CHANGED == "pr.status_changed" + assert EventType.PR_CHECKS_PASSED == "pr.checks_passed" + assert EventType.PR_CHECKS_FAILED == "pr.checks_failed" + assert EventType.PR_REVIEW_SUBMITTED == "pr.review_submitted" + assert EventType.PR_MERGED == "pr.merged" + assert EventType.PR_MERGE_FAILED == "pr.merge_failed" + + def test_sync_operation_types(self): + from gr2.python_cli.events import EventType + assert EventType.SYNC_STARTED == "sync.started" + assert EventType.SYNC_REPO_UPDATED == "sync.repo_updated" + assert EventType.SYNC_REPO_SKIPPED == "sync.repo_skipped" + assert EventType.SYNC_CONFLICT == "sync.conflict" + assert EventType.SYNC_COMPLETED == "sync.completed" + assert EventType.SYNC_CACHE_SEEDED == "sync.cache_seeded" + assert EventType.SYNC_CACHE_REFRESHED == "sync.cache_refreshed" + + def test_recovery_types(self): + from gr2.python_cli.events import EventType + assert EventType.FAILURE_RESOLVED == "failure.resolved" + assert EventType.LEASE_RECLAIMED == "lease.reclaimed" + + def test_workspace_operation_types(self): + from gr2.python_cli.events import EventType + assert EventType.WORKSPACE_MATERIALIZED == "workspace.materialized" + assert EventType.WORKSPACE_FILE_PROJECTED == "workspace.file_projected" + + def test_total_count(self): + from gr2.python_cli.events import EventType + # 5 lane + 4 lease + 4 hook + 7 PR + 7 sync + 2 recovery + 2 workspace = 31 + assert len(EventType) == 31 + + +# --------------------------------------------------------------------------- +# 2. emit() function (sections 4.2, 7.1) +# --------------------------------------------------------------------------- + +class TestEmit: + """emit() must produce flat JSONL events in .grip/events/outbox.jsonl.""" + + def test_creates_outbox_file(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + assert outbox.exists() + + def test_single_json_line(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + lines = outbox.read_text().strip().split("\n") + assert len(lines) == 1 + event = json.loads(lines[0]) + assert isinstance(event, dict) + + def test_flat_envelope(self, workspace: Path): + """Event must be flat: domain fields at top level, no nested payload.""" + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + # Domain fields must be top-level + assert event["lane_name"] == "feat/test" + assert event["lane_type"] == "feature" + assert event["repos"] == ["grip"] + # No nested payload key + assert "payload" not in event + + def test_envelope_fields(self, workspace: Path): + """Envelope fields: version, event_id, seq, timestamp, type.""" + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + assert event["version"] == 1 + assert event["type"] == "lane.entered" + assert "event_id" in event + assert "seq" in event + assert "timestamp" in event + + def test_event_id_format(self, workspace: Path): + """event_id must be 16-char hex from os.urandom(8).hex().""" + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + event_id = event["event_id"] + assert len(event_id) == 16 + assert all(c in "0123456789abcdef" for c in event_id) + + def test_context_fields(self, workspace: Path): + """Context fields: workspace, actor, owner_unit.""" + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + assert event["workspace"] == workspace.name + assert event["actor"] == "agent:apollo" + assert event["owner_unit"] == "apollo" + + def test_optional_agent_id(self, workspace: Path): + """agent_id is included when provided, absent when not.""" + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + agent_id="agent_apollo_xyz789", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + assert event["agent_id"] == "agent_apollo_xyz789" + + def test_agent_id_absent_when_not_provided(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + assert "agent_id" not in event + + def test_timestamp_is_iso8601_with_tz(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + ts = datetime.fromisoformat(event["timestamp"]) + assert ts.tzinfo is not None + + def test_reserved_name_collision_raises(self, workspace: Path): + """Payload keys must not collide with envelope/context field names.""" + from gr2.python_cli.events import emit, EventType + with pytest.raises((ValueError, KeyError)): + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"version": 99, "lane_name": "feat/test"}, + ) + + +# --------------------------------------------------------------------------- +# 3. Monotonic sequence numbers (section 4.2) +# --------------------------------------------------------------------------- + +class TestSequenceNumbers: + """seq must be strictly monotonically increasing, starting at 1.""" + + def test_first_event_seq_is_1(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + event = json.loads(outbox.read_text().strip()) + assert event["seq"] == 1 + + def test_monotonic_across_multiple_emits(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + for _ in range(5): + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + lines = outbox.read_text().strip().split("\n") + seqs = [json.loads(line)["seq"] for line in lines] + assert seqs == [1, 2, 3, 4, 5] + + def test_unique_event_ids(self, workspace: Path): + from gr2.python_cli.events import emit, EventType + for _ in range(10): + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + lines = outbox.read_text().strip().split("\n") + ids = [json.loads(line)["event_id"] for line in lines] + assert len(set(ids)) == 10 + + +# --------------------------------------------------------------------------- +# 4. Outbox rotation (section 4.3) +# --------------------------------------------------------------------------- + +class TestOutboxRotation: + """Outbox rotates at 10MB threshold.""" + + def test_rotation_creates_timestamped_archive(self, workspace: Path): + from gr2.python_cli.events import emit, EventType, _outbox_path + outbox = _outbox_path(workspace) + # Write a large payload to push past 10MB + outbox.parent.mkdir(parents=True, exist_ok=True) + outbox.write_text("x" * (10 * 1024 * 1024 + 1)) + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + # Old file should be renamed to outbox.{timestamp}.jsonl + archives = list(outbox.parent.glob("outbox.*.jsonl")) + assert len(archives) == 1 + # New outbox should exist with the fresh event + assert outbox.exists() + event = json.loads(outbox.read_text().strip()) + assert event["type"] == "lane.entered" + + def test_seq_continues_after_rotation(self, workspace: Path): + from gr2.python_cli.events import emit, EventType, _outbox_path + outbox = _outbox_path(workspace) + outbox.parent.mkdir(parents=True, exist_ok=True) + # Write 5 fake events to set seq baseline + lines = [] + for i in range(1, 6): + lines.append(json.dumps({"seq": i, "type": "test"})) + outbox.write_text("\n".join(lines) + "\n") + # Pad to trigger rotation + with outbox.open("a") as f: + f.write("x" * (10 * 1024 * 1024)) + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + event = json.loads(outbox.read_text().strip()) + assert event["seq"] == 6 # continues from last seq + + +# --------------------------------------------------------------------------- +# 5. Cursor-based consumption (section 5.1) +# --------------------------------------------------------------------------- + +class TestCursorModel: + """Cursor-based reading for event consumers.""" + + def test_read_events_from_empty_outbox(self, workspace: Path): + from gr2.python_cli.events import read_events + events = read_events(workspace, "test_consumer") + assert events == [] + + def test_read_events_returns_all_for_new_consumer(self, workspace: Path): + from gr2.python_cli.events import emit, read_events, EventType + for i in range(3): + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": f"lane-{i}", "lane_type": "feature", "repos": ["grip"]}, + ) + events = read_events(workspace, "test_consumer") + assert len(events) == 3 + assert [e["lane_name"] for e in events] == ["lane-0", "lane-1", "lane-2"] + + def test_cursor_advances_after_read(self, workspace: Path): + from gr2.python_cli.events import emit, read_events, EventType + for i in range(3): + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": f"lane-{i}", "lane_type": "feature", "repos": ["grip"]}, + ) + # First read: get all 3 + events = read_events(workspace, "my_consumer") + assert len(events) == 3 + # Second read: get nothing (cursor advanced) + events = read_events(workspace, "my_consumer") + assert len(events) == 0 + + def test_cursor_only_returns_new_events(self, workspace: Path): + from gr2.python_cli.events import emit, read_events, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "first", "lane_type": "feature", "repos": ["grip"]}, + ) + read_events(workspace, "my_consumer") + # Emit more after cursor advanced + emit( + event_type=EventType.LANE_EXITED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "first", "stashed_repos": []}, + ) + events = read_events(workspace, "my_consumer") + assert len(events) == 1 + assert events[0]["type"] == "lane.exited" + + def test_cursor_file_created(self, workspace: Path): + from gr2.python_cli.events import emit, read_events, EventType + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + read_events(workspace, "test_consumer") + cursor_file = workspace / ".grip" / "events" / "cursors" / "test_consumer.json" + assert cursor_file.exists() + cursor = json.loads(cursor_file.read_text()) + assert cursor["consumer"] == "test_consumer" + assert cursor["last_seq"] == 1 + + def test_independent_cursors(self, workspace: Path): + """Different consumers maintain independent cursors.""" + from gr2.python_cli.events import emit, read_events, EventType + for i in range(3): + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": f"lane-{i}", "lane_type": "feature", "repos": ["grip"]}, + ) + # Consumer A reads all 3 + events_a = read_events(workspace, "consumer_a") + assert len(events_a) == 3 + # Consumer B hasn't read yet, gets all 3 + events_b = read_events(workspace, "consumer_b") + assert len(events_b) == 3 + + +# --------------------------------------------------------------------------- +# 6. Outbox path helper (section 4.1) +# --------------------------------------------------------------------------- + +class TestOutboxPath: + + def test_outbox_path(self, workspace: Path): + from gr2.python_cli.events import _outbox_path + assert _outbox_path(workspace) == workspace / ".grip" / "events" / "outbox.jsonl" + + +# --------------------------------------------------------------------------- +# 7. emit() error handling (section 10.1) +# --------------------------------------------------------------------------- + +class TestEmitErrorHandling: + + def test_emit_does_not_raise_on_write_failure(self, workspace: Path): + """emit() logs to stderr but does not crash on write failure.""" + from gr2.python_cli.events import emit, EventType + # Make the events directory read-only to force a write failure + events_dir = workspace / ".grip" / "events" + events_dir.chmod(0o444) + try: + # Should not raise + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + finally: + events_dir.chmod(0o755) + + def test_emit_creates_events_dir_if_missing(self, workspace: Path): + """emit() creates .grip/events/ if it doesn't exist.""" + from gr2.python_cli.events import emit, EventType + events_dir = workspace / ".grip" / "events" + # Remove the events directory + events_dir.rmdir() + emit( + event_type=EventType.LANE_ENTERED, + workspace_root=workspace, + actor="agent:apollo", + owner_unit="apollo", + payload={"lane_name": "feat/test", "lane_type": "feature", "repos": ["grip"]}, + ) + assert (events_dir / "outbox.jsonl").exists() diff --git a/gr2/tests/test_hook_events.py b/gr2/tests/test_hook_events.py new file mode 100644 index 0000000..e4c6109 --- /dev/null +++ b/gr2/tests/test_hook_events.py @@ -0,0 +1,327 @@ +"""Tests for hook execution event emission. + +Verifies that run_lifecycle_stage emits hook.started, hook.completed, +hook.failed, and hook.skipped events per HOOK-EVENT-CONTRACT.md sections +3.2 (Hook Execution) and 6.2-6.4. +""" +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from gr2.python_cli.hooks import ( + HookContext, + HookRuntimeError, + LifecycleHook, + RepoHooks, + run_lifecycle_stage, +) + + +def _make_ctx(workspace: Path) -> HookContext: + repo_root = workspace / "repos" / "grip" + repo_root.mkdir(parents=True, exist_ok=True) + return HookContext( + workspace_root=workspace, + lane_root=workspace / "lanes" / "apollo" / "feat-test", + repo_root=repo_root, + repo_name="grip", + lane_owner="apollo", + lane_subject="grip", + lane_name="feat/test", + ) + + +def _make_hooks(lifecycle_hooks: list[LifecycleHook], stage: str = "on_enter") -> RepoHooks: + kwargs = {"on_materialize": [], "on_enter": [], "on_exit": []} + kwargs[stage] = lifecycle_hooks + return RepoHooks( + repo_name="grip", + file_links=[], + file_copies=[], + policy={}, + path=Path("/fake/.gr2/hooks.toml"), + **kwargs, + ) + + +def _read_outbox(workspace: Path) -> list[dict]: + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + if not outbox.exists(): + return [] + lines = outbox.read_text().strip().split("\n") + return [json.loads(line) for line in lines if line.strip()] + + +# --------------------------------------------------------------------------- +# 1. hook.completed (successful hook) +# --------------------------------------------------------------------------- + +class TestHookCompleted: + + def test_emits_started_and_completed(self, workspace: Path): + """Successful hook emits hook.started then hook.completed.""" + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="check-version", command="true", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + types = [e["type"] for e in events] + assert types == ["hook.started", "hook.completed"] + + def test_started_payload(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="check-version", command="echo hello", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + started = events[0] + assert started["type"] == "hook.started" + assert started["stage"] == "on_enter" + assert started["hook_name"] == "check-version" + assert started["repo"] == "grip" + assert "command" in started + assert "cwd" in started + + def test_completed_payload(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="check-version", command="true", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + completed = events[1] + assert completed["type"] == "hook.completed" + assert completed["stage"] == "on_enter" + assert completed["hook_name"] == "check-version" + assert completed["repo"] == "grip" + assert completed["exit_code"] == 0 + assert "duration_ms" in completed + assert isinstance(completed["duration_ms"], int) + + +# --------------------------------------------------------------------------- +# 2. hook.failed with on_failure="block" +# --------------------------------------------------------------------------- + +class TestHookFailedBlock: + + def test_emits_started_and_failed(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="install-deps", command="false", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ) + hooks = _make_hooks([hook]) + with pytest.raises(HookRuntimeError): + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + types = [e["type"] for e in events] + assert types == ["hook.started", "hook.failed"] + + def test_failed_payload(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="install-deps", command="echo bad >&2; false", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ) + hooks = _make_hooks([hook]) + with pytest.raises(HookRuntimeError): + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + failed = events[1] + assert failed["type"] == "hook.failed" + assert failed["stage"] == "on_enter" + assert failed["hook_name"] == "install-deps" + assert failed["repo"] == "grip" + assert failed["exit_code"] != 0 + assert failed["on_failure"] == "block" + assert "duration_ms" in failed + assert "stderr_tail" in failed + + +# --------------------------------------------------------------------------- +# 3. hook.failed with on_failure="warn" +# --------------------------------------------------------------------------- + +class TestHookFailedWarn: + + def test_emits_started_and_failed(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="lint", command="false", + cwd=str(ctx.repo_root), when="always", on_failure="warn", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + types = [e["type"] for e in events] + assert types == ["hook.started", "hook.failed"] + + def test_failed_payload_on_failure_warn(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="lint", command="false", + cwd=str(ctx.repo_root), when="always", on_failure="warn", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + failed = events[1] + assert failed["on_failure"] == "warn" + + +# --------------------------------------------------------------------------- +# 4. hook.failed with on_failure="skip" +# --------------------------------------------------------------------------- + +class TestHookFailedSkip: + + def test_emits_started_and_failed(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="optional", command="false", + cwd=str(ctx.repo_root), when="always", on_failure="skip", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + types = [e["type"] for e in events] + assert types == ["hook.started", "hook.failed"] + + def test_failed_payload_on_failure_skip(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="optional", command="false", + cwd=str(ctx.repo_root), when="always", on_failure="skip", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + failed = events[1] + assert failed["on_failure"] == "skip" + + +# --------------------------------------------------------------------------- +# 5. hook.skipped (when condition not met) +# --------------------------------------------------------------------------- + +class TestHookSkipped: + + def test_emits_skipped(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="first-only", command="true", + cwd=str(ctx.repo_root), when="first_materialize", on_failure="block", + ) + hooks = _make_hooks([hook]) + # first_materialize=False -> when=first_materialize does not match + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + assert len(events) == 1 + assert events[0]["type"] == "hook.skipped" + + def test_skipped_payload(self, workspace: Path): + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="first-only", command="true", + cwd=str(ctx.repo_root), when="first_materialize", on_failure="block", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + skipped = events[0] + assert skipped["hook_name"] == "first-only" + assert skipped["repo"] == "grip" + assert skipped["stage"] == "on_enter" + assert "reason" in skipped + + def test_skipped_no_started_event(self, workspace: Path): + """Skipped hooks must NOT emit hook.started.""" + ctx = _make_ctx(workspace) + hook = LifecycleHook( + stage="on_enter", name="first-only", command="true", + cwd=str(ctx.repo_root), when="first_materialize", on_failure="block", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + types = [e["type"] for e in events] + assert "hook.started" not in types + + +# --------------------------------------------------------------------------- +# 6. Multiple hooks in sequence +# --------------------------------------------------------------------------- + +class TestMultipleHooks: + + def test_two_hooks_both_succeed(self, workspace: Path): + ctx = _make_ctx(workspace) + hooks = _make_hooks([ + LifecycleHook( + stage="on_enter", name="hook-a", command="true", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ), + LifecycleHook( + stage="on_enter", name="hook-b", command="true", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ), + ]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + types = [e["type"] for e in events] + assert types == [ + "hook.started", "hook.completed", + "hook.started", "hook.completed", + ] + assert events[0]["hook_name"] == "hook-a" + assert events[2]["hook_name"] == "hook-b" + + def test_second_hook_skipped_first_succeeds(self, workspace: Path): + ctx = _make_ctx(workspace) + hooks = _make_hooks([ + LifecycleHook( + stage="on_enter", name="always-hook", command="true", + cwd=str(ctx.repo_root), when="always", on_failure="block", + ), + LifecycleHook( + stage="on_enter", name="dirty-only", command="true", + cwd=str(ctx.repo_root), when="dirty", on_failure="block", + ), + ]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + types = [e["type"] for e in events] + assert types == ["hook.started", "hook.completed", "hook.skipped"] + + +# --------------------------------------------------------------------------- +# 7. stderr_tail truncation (section 6.3) +# --------------------------------------------------------------------------- + +class TestStderrTail: + + def test_stderr_tail_truncated_to_500_bytes(self, workspace: Path): + ctx = _make_ctx(workspace) + # Generate > 500 bytes of stderr + long_stderr_cmd = "python3 -c \"import sys; sys.stderr.write('x' * 1000)\"; false" + hook = LifecycleHook( + stage="on_enter", name="noisy", command=long_stderr_cmd, + cwd=str(ctx.repo_root), when="always", on_failure="warn", + ) + hooks = _make_hooks([hook]) + run_lifecycle_stage(hooks, "on_enter", ctx, repo_dirty=False, first_materialize=False) + events = _read_outbox(workspace) + failed = [e for e in events if e["type"] == "hook.failed"][0] + assert len(failed["stderr_tail"]) <= 500 diff --git a/gr2/tests/test_pr_events.py b/gr2/tests/test_pr_events.py new file mode 100644 index 0000000..027df1d --- /dev/null +++ b/gr2/tests/test_pr_events.py @@ -0,0 +1,510 @@ +"""Tests for PR lifecycle event emission. + +Verifies that pr.py emits pr.created, pr.merged, pr.merge_failed, +pr.status_changed, pr.checks_passed, pr.checks_failed, and +pr.review_submitted events per HOOK-EVENT-CONTRACT.md section 3.2 +(PR Lifecycle) and PR-LIFECYCLE.md. + +Uses a FakeAdapter to avoid real GitHub calls. +""" +from __future__ import annotations + +import json +from pathlib import Path + +import pytest + +from gr2.python_cli.platform import ( + AdapterError, + CreatePRRequest, + PRCheck, + PRRef, + PRStatus, +) + + +class FakeAdapter: + """Test double for PlatformAdapter. Records calls, returns canned data.""" + + name = "fake" + + def __init__(self) -> None: + self.created: list[CreatePRRequest] = [] + self.merged: list[tuple[str, int]] = [] + self.statuses: dict[tuple[str, int], PRStatus] = {} + self._fail_merge: set[tuple[str, int]] = set() + + def create_pr(self, request: CreatePRRequest) -> PRRef: + self.created.append(request) + n = len(self.created) + 100 + return PRRef( + repo=request.repo, + number=n, + url=f"https://github.com/test/{request.repo}/pull/{n}", + head_branch=request.head_branch, + base_branch=request.base_branch, + title=request.title, + ) + + def merge_pr(self, repo: str, number: int) -> PRRef: + if (repo, number) in self._fail_merge: + raise AdapterError(f"merge conflict in {repo}#{number}") + self.merged.append((repo, number)) + return PRRef(repo=repo, number=number) + + def pr_status(self, repo: str, number: int) -> PRStatus: + key = (repo, number) + if key in self.statuses: + return self.statuses[key] + return PRStatus( + ref=PRRef(repo=repo, number=number), + state="OPEN", + checks=[], + ) + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: + return [] + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: + return self.pr_status(repo, number).checks + + def set_fail_merge(self, repo: str, number: int) -> None: + self._fail_merge.add((repo, number)) + + def set_status(self, repo: str, number: int, status: PRStatus) -> None: + self.statuses[(repo, number)] = status + + +def _read_outbox(workspace: Path) -> list[dict]: + outbox = workspace / ".grip" / "events" / "outbox.jsonl" + if not outbox.exists(): + return [] + lines = outbox.read_text().strip().split("\n") + return [json.loads(line) for line in lines if line.strip()] + + +def _events_of_type(workspace: Path, event_type: str) -> list[dict]: + return [e for e in _read_outbox(workspace) if e["type"] == event_type] + + +# --------------------------------------------------------------------------- +# 1. pr.created (section 3.2, PR-LIFECYCLE.md section 3.1) +# --------------------------------------------------------------------------- + +class TestPRCreated: + + def test_emits_pr_created(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group + adapter = FakeAdapter() + result = create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/hook-events", + title="feat: hook events", + base_branch="sprint-21", + head_branch="test/event-system-runtime", + repos=["grip", "synapt"], + adapter=adapter, + actor="agent:apollo", + ) + events = _events_of_type(workspace, "pr.created") + assert len(events) == 1 + + def test_pr_created_payload(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group + adapter = FakeAdapter() + result = create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/hook-events", + title="feat: hook events", + base_branch="sprint-21", + head_branch="test/event-system-runtime", + repos=["grip", "synapt"], + adapter=adapter, + actor="agent:apollo", + ) + event = _events_of_type(workspace, "pr.created")[0] + assert "pr_group_id" in event + assert isinstance(event["repos"], list) + assert len(event["repos"]) == 2 + for pr in event["repos"]: + assert "repo" in pr + assert "pr_number" in pr + assert "url" in pr + + def test_pr_group_id_format(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group + adapter = FakeAdapter() + result = create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/hook-events", + title="feat: hook events", + base_branch="sprint-21", + head_branch="test/event-system-runtime", + repos=["grip"], + adapter=adapter, + actor="agent:apollo", + ) + event = _events_of_type(workspace, "pr.created")[0] + gid = event["pr_group_id"] + assert gid.startswith("pg_") + assert len(gid) == 11 # pg_ + 8 hex chars + assert all(c in "0123456789abcdef" for c in gid[3:]) + + def test_pr_group_metadata_stored(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group + adapter = FakeAdapter() + result = create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/hook-events", + title="feat: hook events", + base_branch="sprint-21", + head_branch="test/event-system-runtime", + repos=["grip"], + adapter=adapter, + actor="agent:apollo", + ) + gid = result["pr_group_id"] + meta_path = workspace / ".grip" / "pr_groups" / f"{gid}.json" + assert meta_path.exists() + meta = json.loads(meta_path.read_text()) + assert meta["pr_group_id"] == gid + assert meta["lane_name"] == "feat/hook-events" + + def test_calls_adapter_per_repo(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group + adapter = FakeAdapter() + create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/hook-events", + title="feat: hook events", + base_branch="sprint-21", + head_branch="test/event-system-runtime", + repos=["grip", "synapt", "synapt-private"], + adapter=adapter, + actor="agent:apollo", + ) + assert len(adapter.created) == 3 + assert [r.repo for r in adapter.created] == ["grip", "synapt", "synapt-private"] + + +# --------------------------------------------------------------------------- +# 2. pr.merged (section 3.2, PR-LIFECYCLE.md section 3.3) +# --------------------------------------------------------------------------- + +class TestPRMerged: + + def _create_group(self, workspace: Path, adapter: FakeAdapter, repos: list[str] | None = None) -> dict: + from gr2.python_cli.pr import create_pr_group + return create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/test", + title="feat: test", + base_branch="sprint-21", + head_branch="feat/test", + repos=repos or ["grip", "synapt"], + adapter=adapter, + actor="agent:apollo", + ) + + def test_emits_pr_merged(self, workspace: Path): + from gr2.python_cli.pr import merge_pr_group + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + merge_pr_group( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + events = _events_of_type(workspace, "pr.merged") + assert len(events) == 1 + + def test_pr_merged_payload(self, workspace: Path): + from gr2.python_cli.pr import merge_pr_group + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + merge_pr_group( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + event = _events_of_type(workspace, "pr.merged")[0] + assert event["pr_group_id"] == group["pr_group_id"] + assert isinstance(event["repos"], list) + assert len(event["repos"]) == 2 + + def test_merges_in_repo_order(self, workspace: Path): + from gr2.python_cli.pr import merge_pr_group + adapter = FakeAdapter() + group = self._create_group(workspace, adapter, repos=["grip", "synapt", "synapt-private"]) + merge_pr_group( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + assert [r for r, _ in adapter.merged] == ["grip", "synapt", "synapt-private"] + + +# --------------------------------------------------------------------------- +# 3. pr.merge_failed (section 3.2, PR-LIFECYCLE.md section 4.4) +# --------------------------------------------------------------------------- + +class TestPRMergeFailed: + + def _create_group(self, workspace: Path, adapter: FakeAdapter) -> dict: + from gr2.python_cli.pr import create_pr_group + return create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/test", + title="feat: test", + base_branch="sprint-21", + head_branch="feat/test", + repos=["grip", "synapt"], + adapter=adapter, + actor="agent:apollo", + ) + + def test_emits_merge_failed(self, workspace: Path): + from gr2.python_cli.pr import merge_pr_group, PRMergeError + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + # Make synapt fail + synapt_pr = [p for p in group["prs"] if p["repo"] == "synapt"][0] + adapter.set_fail_merge("synapt", synapt_pr["pr_number"]) + with pytest.raises(PRMergeError): + merge_pr_group( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + events = _events_of_type(workspace, "pr.merge_failed") + assert len(events) == 1 + + def test_merge_failed_payload(self, workspace: Path): + from gr2.python_cli.pr import merge_pr_group, PRMergeError + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + synapt_pr = [p for p in group["prs"] if p["repo"] == "synapt"][0] + adapter.set_fail_merge("synapt", synapt_pr["pr_number"]) + with pytest.raises(PRMergeError): + merge_pr_group( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + event = _events_of_type(workspace, "pr.merge_failed")[0] + assert event["pr_group_id"] == group["pr_group_id"] + assert event["repo"] == "synapt" + assert "reason" in event + + def test_stops_after_first_failure(self, workspace: Path): + """Merge stops at first failure; remaining repos are not attempted.""" + from gr2.python_cli.pr import create_pr_group, merge_pr_group, PRMergeError + adapter = FakeAdapter() + group = create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/test", + title="feat: test", + base_branch="sprint-21", + head_branch="feat/test", + repos=["grip", "synapt", "synapt-private"], + adapter=adapter, + actor="agent:apollo", + ) + # Make grip (first repo) fail + grip_pr = [p for p in group["prs"] if p["repo"] == "grip"][0] + adapter.set_fail_merge("grip", grip_pr["pr_number"]) + with pytest.raises(PRMergeError): + merge_pr_group( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + # Only grip was attempted; synapt and synapt-private were not + assert len(adapter.merged) == 0 # grip failed, not in merged list + assert len(_events_of_type(workspace, "pr.merged")) == 0 + + +# --------------------------------------------------------------------------- +# 4. pr.status_changed, pr.checks_passed, pr.checks_failed +# --------------------------------------------------------------------------- + +class TestPRStatusEvents: + + def _create_group(self, workspace: Path, adapter: FakeAdapter) -> dict: + from gr2.python_cli.pr import create_pr_group + return create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/test", + title="feat: test", + base_branch="sprint-21", + head_branch="feat/test", + repos=["grip"], + adapter=adapter, + actor="agent:apollo", + ) + + def test_checks_passed_emitted(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group, check_pr_group_status + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + grip_pr = group["prs"][0] + # Set checks to all passing + adapter.set_status("grip", grip_pr["pr_number"], PRStatus( + ref=PRRef(repo="grip", number=grip_pr["pr_number"]), + state="OPEN", + checks=[ + PRCheck(name="ci/test", status="COMPLETED", conclusion="SUCCESS"), + PRCheck(name="ci/lint", status="COMPLETED", conclusion="SUCCESS"), + ], + )) + check_pr_group_status( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + events = _events_of_type(workspace, "pr.checks_passed") + assert len(events) == 1 + assert events[0]["repo"] == "grip" + assert events[0]["pr_group_id"] == group["pr_group_id"] + + def test_checks_failed_emitted(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group, check_pr_group_status + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + grip_pr = group["prs"][0] + adapter.set_status("grip", grip_pr["pr_number"], PRStatus( + ref=PRRef(repo="grip", number=grip_pr["pr_number"]), + state="OPEN", + checks=[ + PRCheck(name="ci/test", status="COMPLETED", conclusion="FAILURE"), + PRCheck(name="ci/lint", status="COMPLETED", conclusion="SUCCESS"), + ], + )) + check_pr_group_status( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + events = _events_of_type(workspace, "pr.checks_failed") + assert len(events) == 1 + assert events[0]["repo"] == "grip" + assert "ci/test" in events[0]["failed_checks"] + + def test_status_changed_emitted(self, workspace: Path): + from gr2.python_cli.pr import create_pr_group, check_pr_group_status + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + grip_pr = group["prs"][0] + adapter.set_status("grip", grip_pr["pr_number"], PRStatus( + ref=PRRef(repo="grip", number=grip_pr["pr_number"]), + state="MERGED", + checks=[], + )) + check_pr_group_status( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + events = _events_of_type(workspace, "pr.status_changed") + assert len(events) == 1 + assert events[0]["repo"] == "grip" + assert events[0]["new_status"] == "MERGED" + + def test_no_event_when_status_unchanged(self, workspace: Path): + """Second status check with no changes emits no events.""" + from gr2.python_cli.pr import create_pr_group, check_pr_group_status + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + # Default status is OPEN with no checks -- first check caches it + check_pr_group_status( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + events_before = len(_read_outbox(workspace)) + # Second check, same status + check_pr_group_status( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + adapter=adapter, + actor="agent:apollo", + ) + events_after = len(_read_outbox(workspace)) + # No new status_changed events + assert events_after == events_before + + +# --------------------------------------------------------------------------- +# 5. pr.review_submitted +# --------------------------------------------------------------------------- + +class TestPRReviewSubmitted: + + def _create_group(self, workspace: Path, adapter: FakeAdapter) -> dict: + from gr2.python_cli.pr import create_pr_group + return create_pr_group( + workspace_root=workspace, + owner_unit="apollo", + lane_name="feat/test", + title="feat: test", + base_branch="sprint-21", + head_branch="feat/test", + repos=["grip"], + adapter=adapter, + actor="agent:apollo", + ) + + def test_review_event_emitted(self, workspace: Path): + from gr2.python_cli.pr import record_pr_review + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + record_pr_review( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + repo="grip", + pr_number=group["prs"][0]["pr_number"], + reviewer="agent:sentinel", + state="APPROVED", + actor="agent:sentinel", + ) + events = _events_of_type(workspace, "pr.review_submitted") + assert len(events) == 1 + + def test_review_payload(self, workspace: Path): + from gr2.python_cli.pr import record_pr_review + adapter = FakeAdapter() + group = self._create_group(workspace, adapter) + record_pr_review( + workspace_root=workspace, + pr_group_id=group["pr_group_id"], + repo="grip", + pr_number=group["prs"][0]["pr_number"], + reviewer="agent:sentinel", + state="CHANGES_REQUESTED", + actor="agent:sentinel", + ) + event = _events_of_type(workspace, "pr.review_submitted")[0] + assert event["pr_group_id"] == group["pr_group_id"] + assert event["repo"] == "grip" + assert event["pr_number"] == group["prs"][0]["pr_number"] + assert event["reviewer"] == "agent:sentinel" + assert event["state"] == "CHANGES_REQUESTED" From 926f35fa631247a3bc0b5b3569411c35d8ab27c6 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:20:09 -0500 Subject: [PATCH 08/18] feat: start sprint 21 sync and platform adapter lane --- gr2/python_cli/app.py | 180 +++++++++++++++++++ gr2/python_cli/gitops.py | 15 ++ gr2/python_cli/syncops.py | 35 ++++ gr2/tests/test_sprint21_sync_platform.py | 218 +++++++++++++++++++++++ 4 files changed, 448 insertions(+) create mode 100644 gr2/tests/test_sprint21_sync_platform.py diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index 0fa3843..663c631 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -3,6 +3,7 @@ import contextlib import io import json +import os from pathlib import Path from types import SimpleNamespace from typing import Optional @@ -25,6 +26,7 @@ ) from .events import emit, EventType from .hooks import HookContext, apply_file_projections, load_repo_hooks, run_lifecycle_stage +from .platform import CreatePRRequest, PRRef, get_platform_adapter from . import spec_apply from gr2.prototypes import lane_workspace_prototype as lane_proto from gr2.prototypes import repo_maintenance_prototype as repo_proto @@ -37,6 +39,7 @@ lane_app = typer.Typer(help="Lane creation and navigation") lease_app = typer.Typer(help="Lane lease operations") review_app = typer.Typer(help="Review and reviewer requirement operations") +pr_app = typer.Typer(help="Cross-repo PR orchestration") workspace_app = typer.Typer(help="Workspace bootstrap and materialization") spec_app = typer.Typer(help="Declarative workspace spec operations") exec_app = typer.Typer(help="Lane-aware execution planning and execution") @@ -46,6 +49,7 @@ app.add_typer(lane_app, name="lane") lane_app.add_typer(lease_app, name="lease") app.add_typer(review_app, name="review") +app.add_typer(pr_app, name="pr") app.add_typer(workspace_app, name="workspace") app.add_typer(spec_app, name="spec") app.add_typer(exec_app, name="exec") @@ -196,6 +200,46 @@ def _repo_hook_context(workspace_root: Path, repo_root: Path) -> HookContext: ) +def _resolve_lane_name(workspace_root: Path, owner_unit: str, lane_name: Optional[str]) -> str: + if lane_name: + return lane_name + current_doc = lane_proto.load_current_lane_doc(workspace_root, owner_unit) + return str(current_doc["current"]["lane_name"]) + + +def _pr_groups_root(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "pr_groups" + + +def _pr_group_path(workspace_root: Path, owner_unit: str, lane_name: str) -> Path: + return _pr_groups_root(workspace_root) / owner_unit / f"{lane_name}.json" + + +def _write_pr_group(workspace_root: Path, owner_unit: str, lane_name: str, payload: dict[str, object]) -> Path: + path = _pr_group_path(workspace_root, owner_unit, lane_name) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2) + "\n") + return path + + +def _load_pr_group(workspace_root: Path, owner_unit: str, lane_name: str) -> dict[str, object]: + path = _pr_group_path(workspace_root, owner_unit, lane_name) + if not path.exists(): + raise SystemExit(f"pr group not found for {owner_unit}/{lane_name}: {path}") + return json.loads(path.read_text()) + + +def _repo_slug_from_url(url: str, fallback_name: str) -> str: + cleaned = url.strip() + if cleaned.startswith("git@github.com:"): + slug = cleaned.split("git@github.com:", 1)[1] + return slug.removesuffix(".git") + if cleaned.startswith("https://github.com/"): + slug = cleaned.split("https://github.com/", 1)[1] + return slug.removesuffix(".git") + return fallback_name + + def _write_workspace_spec(workspace_root: Path, repos: list[dict[str, str]], default_unit: str) -> Path: spec_path = _workspace_spec_path(workspace_root) spec_path.parent.mkdir(parents=True, exist_ok=True) @@ -873,5 +917,141 @@ def review_checkout_pr( typer.echo(json.dumps(payload, indent=2)) +@pr_app.command("create") +def pr_create( + workspace_root: Path, + owner_unit: str, + lane_name: Optional[str] = typer.Argument(None, help="Lane name. Defaults to the unit's current lane."), + platform: str = typer.Option("github", "--platform", help="Platform adapter name"), + base_branch: str = typer.Option("main", "--base", help="Base branch for created PRs"), + draft: bool = typer.Option(False, "--draft", help="Create PRs as drafts"), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Create a grouped set of per-repo PRs for a lane.""" + workspace_root = workspace_root.resolve() + resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) + lane_doc = lane_proto.load_lane_doc(workspace_root, owner_unit, resolved_lane) + spec = lane_proto.load_workspace_spec(workspace_root) + adapter = get_platform_adapter(platform) + pr_group_id = f"pg_{os.urandom(4).hex()}" + refs: list[dict[str, object]] = [] + branch_map = dict(lane_doc.get("branch_map", {})) + for repo_name in lane_doc.get("repos", []): + repo_spec = next(repo for repo in spec.get("repos", []) if repo.get("name") == repo_name) + request = CreatePRRequest( + repo=_repo_slug_from_url(str(repo_spec.get("url", "")), repo_name), + title=resolved_lane, + body=f"gr2 PR group {pr_group_id} for {owner_unit}/{resolved_lane}", + head_branch=str(branch_map.get(repo_name, resolved_lane)), + base_branch=base_branch, + draft=draft, + ) + ref = adapter.create_pr(request) + refs.append(ref.as_dict()) + payload = { + "pr_group_id": pr_group_id, + "owner_unit": owner_unit, + "lane_name": resolved_lane, + "platform": platform, + "refs": refs, + } + path = _write_pr_group(workspace_root, owner_unit, resolved_lane, payload) + payload["state_path"] = str(path) + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + + +@pr_app.command("status") +def pr_status( + workspace_root: Path, + owner_unit: str, + lane_name: Optional[str] = typer.Argument(None, help="Lane name. Defaults to the unit's current lane."), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Show grouped PR status for a lane.""" + workspace_root = workspace_root.resolve() + resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) + group = _load_pr_group(workspace_root, owner_unit, resolved_lane) + adapter = get_platform_adapter(str(group.get("platform", "github"))) + statuses = [] + for ref_doc in group.get("refs", []): + ref = PRRef(**ref_doc) + statuses.append(adapter.pr_status(ref.repo, int(ref.number)).as_dict()) + payload = { + "pr_group_id": group["pr_group_id"], + "owner_unit": owner_unit, + "lane_name": resolved_lane, + "statuses": statuses, + } + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + + +@pr_app.command("checks") +def pr_checks( + workspace_root: Path, + owner_unit: str, + lane_name: Optional[str] = typer.Argument(None, help="Lane name. Defaults to the unit's current lane."), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Show grouped PR checks for a lane.""" + workspace_root = workspace_root.resolve() + resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) + group = _load_pr_group(workspace_root, owner_unit, resolved_lane) + adapter = get_platform_adapter(str(group.get("platform", "github"))) + rows = [] + for ref_doc in group.get("refs", []): + ref = PRRef(**ref_doc) + rows.append( + { + "repo": ref.repo, + "number": ref.number, + "checks": [item.as_dict() for item in adapter.pr_checks(ref.repo, int(ref.number))], + } + ) + payload = { + "pr_group_id": group["pr_group_id"], + "owner_unit": owner_unit, + "lane_name": resolved_lane, + "checks": rows, + } + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + + +@pr_app.command("merge") +def pr_merge( + workspace_root: Path, + owner_unit: str, + lane_name: Optional[str] = typer.Argument(None, help="Lane name. Defaults to the unit's current lane."), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Merge grouped PRs for a lane.""" + workspace_root = workspace_root.resolve() + resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) + group = _load_pr_group(workspace_root, owner_unit, resolved_lane) + adapter = get_platform_adapter(str(group.get("platform", "github"))) + merged = [] + for ref_doc in group.get("refs", []): + ref = PRRef(**ref_doc) + merged.append(adapter.merge_pr(ref.repo, int(ref.number)).as_dict()) + payload = { + "pr_group_id": group["pr_group_id"], + "owner_unit": owner_unit, + "lane_name": resolved_lane, + "merged": merged, + } + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + + if __name__ == "__main__": app() diff --git a/gr2/python_cli/gitops.py b/gr2/python_cli/gitops.py index 8b10d9b..2a31c49 100644 --- a/gr2/python_cli/gitops.py +++ b/gr2/python_cli/gitops.py @@ -40,6 +40,21 @@ def current_head_sha(path: Path) -> str | None: return value or None +def commits_between(path: Path, old_sha: str | None, new_sha: str | None) -> int: + if not new_sha: + return 0 + if not old_sha: + proc = git(path, "rev-list", "--count", new_sha) + else: + proc = git(path, "rev-list", "--count", f"{old_sha}..{new_sha}") + if proc.returncode != 0: + return 0 + try: + return int(proc.stdout.strip() or "0") + except ValueError: + return 0 + + def ensure_repo_cache(url: str, cache_repo_root: Path) -> bool: """Ensure a local bare mirror exists for a repo URL. diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 664d82a..517ab7d 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -4,6 +4,7 @@ import fcntl import json import os +import time from pathlib import Path from datetime import UTC, datetime @@ -11,6 +12,7 @@ from .gitops import ( clone_repo, + commits_between, current_branch, current_head_sha, discard_if_dirty, @@ -39,6 +41,7 @@ "it does not attempt automatic cross-repo rollback" ) VALID_DIRTY_MODES = {"stash", "block", "discard"} +SYNC_STRATEGY = "reference-cache" @dataclasses.dataclass(frozen=True) @@ -254,6 +257,14 @@ def _emit_sync_event(workspace_root: Path, payload: dict[str, object]) -> None: _append_outbox_event(workspace_root, payload) +def _plan_repo_names(plan: SyncPlan) -> list[str]: + repo_names: list[str] = [] + for op in plan.operations: + if op.scope in {"shared_repo", "lane"}: + repo_names.append(op.subject.split(":")[-1]) + return sorted(dict.fromkeys(repo_names)) + + def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncPlan: workspace_root = workspace_root.resolve() dirty_mode = _normalize_dirty_mode(dirty_mode) @@ -550,6 +561,15 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp repo_spec = _find_repo(spec, op.subject) cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) created = ensure_repo_cache(str(repo_spec["url"]), cache_path) + _emit_sync_event( + workspace_root, + { + "type": "sync.cache_seeded" if created else "sync.cache_refreshed", + "repo": op.subject, + "strategy": SYNC_STRATEGY, + "cache_path": str(cache_path), + }, + ) if op.kind == "seed_repo_cache": return f"seeded repo cache for '{op.subject}' at {cache_path}" if created: @@ -571,6 +591,8 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp "scope": "shared_repo", "old_sha": before_sha, "new_sha": after_sha, + "strategy": SYNC_STRATEGY, + "commits_pulled": commits_between(repo_root, before_sha, after_sha), }, ) return f"cloned shared repo '{op.subject}' into {repo_root}" @@ -607,6 +629,8 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp "old_sha": before_sha, "new_sha": after_sha, "branch": expected_branch, + "strategy": SYNC_STRATEGY, + "commits_pulled": commits_between(target_repo_root, before_sha, after_sha), }, ) return f"materialized lane repo '{op.subject}' at {target_repo_root}" @@ -677,6 +701,7 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root = workspace_root.resolve() dirty_mode = _normalize_dirty_mode(dirty_mode) operation_id = _operation_id() + started_at = time.monotonic() lock_fh = _acquire_sync_lock(workspace_root) if lock_fh is None: blocked_issue = SyncIssue( @@ -718,6 +743,8 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: "operation_id": operation_id, "workspace_root": str(workspace_root), "dirty_mode": dirty_mode, + "repos": _plan_repo_names(build_sync_plan(workspace_root, dirty_mode=dirty_mode)), + "strategy": SYNC_STRATEGY, }, ) plan = build_sync_plan(workspace_root, dirty_mode=dirty_mode) @@ -744,6 +771,10 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: "workspace_root": str(workspace_root), "status": "blocked", "blocked_codes": [item.code for item in blocked], + "repos_updated": 0, + "repos_skipped": 0, + "repos_failed": len(blocked), + "duration_ms": int((time.monotonic() - started_at) * 1000), }, ) _release_sync_lock(lock_fh) @@ -789,6 +820,10 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: "status": status, "applied_count": len(applied), "failure_codes": [item.code for item in failures], + "repos_updated": sum(1 for op in plan.operations if op.kind in {"clone_shared_repo", "materialize_lane_repo"}), + "repos_skipped": sum(1 for op in plan.operations if op.kind in {"stash_dirty_repo", "discard_dirty_repo"}), + "repos_failed": len(failures), + "duration_ms": int((time.monotonic() - started_at) * 1000), }, ) diff --git a/gr2/tests/test_sprint21_sync_platform.py b/gr2/tests/test_sprint21_sync_platform.py new file mode 100644 index 0000000..bd71eba --- /dev/null +++ b/gr2/tests/test_sprint21_sync_platform.py @@ -0,0 +1,218 @@ +from __future__ import annotations + +import json +import subprocess +import sys +import textwrap +from pathlib import Path +from types import SimpleNamespace + +from typer.testing import CliRunner + +sys.path.insert(0, str(Path(__file__).resolve().parents[2])) + +from gr2.python_cli.app import app +from gr2.python_cli import app as app_module +from gr2.python_cli.platform import CreatePRRequest, PRCheck, PRRef, PRStatus +from gr2.python_cli.syncops import run_sync +from gr2.prototypes import lane_workspace_prototype as lane_proto + + +runner = CliRunner() + + +def _git(cwd: Path, *args: str) -> subprocess.CompletedProcess[str]: + return subprocess.run( + ["git", *args], + cwd=cwd, + capture_output=True, + text=True, + check=False, + ) + + +def _init_bare_remote(tmp_path: Path, name: str) -> tuple[Path, str]: + source = tmp_path / f"{name}-src" + source.mkdir(parents=True, exist_ok=True) + assert _git(source, "init", "-b", "main").returncode == 0 + assert _git(source, "config", "user.name", "Atlas").returncode == 0 + assert _git(source, "config", "user.email", "atlas@example.com").returncode == 0 + (source / "README.md").write_text(f"# {name}\n") + assert _git(source, "add", "README.md").returncode == 0 + assert _git(source, "commit", "-m", "initial").returncode == 0 + + remote = tmp_path / f"{name}.git" + assert subprocess.run( + ["git", "clone", "--bare", str(source), str(remote)], + capture_output=True, + text=True, + check=False, + ).returncode == 0 + return remote, remote.as_uri() + + +def _write_workspace_spec(workspace_root: Path, repo_name: str, repo_url: str) -> None: + spec_path = workspace_root / ".grip" / "workspace_spec.toml" + spec_path.parent.mkdir(parents=True, exist_ok=True) + spec_path.write_text( + textwrap.dedent( + f""" + workspace_name = "{workspace_root.name}" + + [[repos]] + name = "{repo_name}" + path = "repos/{repo_name}" + url = "{repo_url}" + + [[units]] + name = "atlas" + path = "agents/atlas/home" + repos = ["{repo_name}"] + """ + ).strip() + + "\n" + ) + + +def _read_outbox(workspace_root: Path) -> list[dict[str, object]]: + outbox = workspace_root / ".grip" / "events" / "outbox.jsonl" + rows: list[dict[str, object]] = [] + if not outbox.exists(): + return rows + for line in outbox.read_text().splitlines(): + if not line.strip(): + continue + rows.append(json.loads(line)) + return rows + + +def test_sync_run_emits_contract_payloads_and_cache_events(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + + result = run_sync(workspace_root) + assert result.status == "success" + + outbox = _read_outbox(workspace_root) + event_types = [str(row["type"]) for row in outbox] + assert "sync.started" in event_types + assert "sync.repo_updated" in event_types + assert "sync.completed" in event_types + assert "sync.cache_seeded" in event_types + + started = next(row for row in outbox if row["type"] == "sync.started") + assert started["repos"] == ["app"] + assert isinstance(started["strategy"], str) + + updated = next(row for row in outbox if row["type"] == "sync.repo_updated") + assert updated["repo"] == "app" + assert isinstance(updated["commits_pulled"], int) + assert updated["commits_pulled"] >= 0 + + completed = next(row for row in outbox if row["type"] == "sync.completed") + assert completed["status"] == "success" + assert completed["repos_updated"] == 1 + assert completed["repos_skipped"] == 0 + assert completed["repos_failed"] == 0 + assert isinstance(completed["duration_ms"], int) + assert completed["duration_ms"] >= 0 + + +def test_sync_run_emits_cache_refresh_event_when_cache_exists(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + + first = run_sync(workspace_root) + assert first.status == "success" + before_count = len(_read_outbox(workspace_root)) + + second = run_sync(workspace_root) + assert second.status == "success" + + outbox = _read_outbox(workspace_root)[before_count:] + event_types = [str(row["type"]) for row in outbox] + assert "sync.cache_refreshed" in event_types + + +def test_pr_command_group_exists_in_python_cli() -> None: + result = runner.invoke(app, ["pr", "--help"]) + assert result.exit_code == 0 + assert "create" in result.stdout + assert "status" in result.stdout + assert "merge" in result.stdout + assert "checks" in result.stdout + + +def test_pr_commands_route_through_platform_adapter(tmp_path: Path, monkeypatch) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + run_sync(workspace_root) + + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit="atlas", + lane_name="feat-auth", + type="feature", + repos="app", + branch="feat/auth", + default_commands=[], + source="pytest", + ) + lane_proto.create_lane(ns) + + calls: list[tuple[str, object]] = [] + + class FakeAdapter: + name = "fake" + + def create_pr(self, request: CreatePRRequest) -> PRRef: + calls.append(("create", request)) + return PRRef( + repo=request.repo, + number=42, + url="https://example.test/pr/42", + head_branch=request.head_branch, + base_branch=request.base_branch, + title=request.title, + ) + + def merge_pr(self, repo: str, number: int) -> PRRef: + calls.append(("merge", (repo, number))) + return PRRef(repo=repo, number=number, url="https://example.test/pr/42") + + def pr_status(self, repo: str, number: int) -> PRStatus: + calls.append(("status", (repo, number))) + ref = PRRef(repo=repo, number=number, url="https://example.test/pr/42") + return PRStatus(ref=ref, state="OPEN", mergeable="MERGEABLE", checks=[PRCheck(name="ci", status="COMPLETED", conclusion="SUCCESS")]) + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: + calls.append(("list", (repo, head_branch))) + return [PRRef(repo=repo, number=42, url="https://example.test/pr/42", head_branch=head_branch, base_branch="main", title="feat/auth")] + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: + calls.append(("checks", (repo, number))) + return [PRCheck(name="ci", status="COMPLETED", conclusion="SUCCESS")] + + monkeypatch.setattr(app_module, "get_platform_adapter", lambda name="github": FakeAdapter()) + + result = runner.invoke(app, ["pr", "create", str(workspace_root), "atlas", "feat-auth", "--json"]) + assert result.exit_code == 0 + assert any(kind == "create" for kind, _ in calls) + + result = runner.invoke(app, ["pr", "status", str(workspace_root), "atlas", "feat-auth", "--json"]) + assert result.exit_code == 0 + assert any(kind == "status" for kind, _ in calls) + + result = runner.invoke(app, ["pr", "checks", str(workspace_root), "atlas", "feat-auth", "--json"]) + assert result.exit_code == 0 + assert any(kind == "checks" for kind, _ in calls) + + result = runner.invoke(app, ["pr", "merge", str(workspace_root), "atlas", "feat-auth", "--json"]) + assert result.exit_code == 0 + assert any(kind == "merge" for kind, _ in calls) From fd36218168bb175262905f4b8f201604aaceaf1b Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:26:47 -0500 Subject: [PATCH 09/18] test: add sync lock and dirty mode coverage --- gr2/tests/test_sprint21_sync_platform.py | 104 +++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/gr2/tests/test_sprint21_sync_platform.py b/gr2/tests/test_sprint21_sync_platform.py index bd71eba..c469e97 100644 --- a/gr2/tests/test_sprint21_sync_platform.py +++ b/gr2/tests/test_sprint21_sync_platform.py @@ -1,5 +1,6 @@ from __future__ import annotations +import fcntl import json import subprocess import sys @@ -86,6 +87,12 @@ def _read_outbox(workspace_root: Path) -> list[dict[str, object]]: return rows +def _stash_list(repo_root: Path) -> list[str]: + proc = _git(repo_root, "stash", "list") + assert proc.returncode == 0 + return [line for line in proc.stdout.splitlines() if line.strip()] + + def test_sync_run_emits_contract_payloads_and_cache_events(tmp_path: Path) -> None: workspace_root = tmp_path / "workspace" workspace_root.mkdir() @@ -216,3 +223,100 @@ def pr_checks(self, repo: str, number: int) -> list[PRCheck]: result = runner.invoke(app, ["pr", "merge", str(workspace_root), "atlas", "feat-auth", "--json"]) assert result.exit_code == 0 assert any(kind == "merge" for kind, _ in calls) + + +def test_sync_run_reports_terminal_blocked_event_on_lock_contention(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + run_sync(workspace_root) + + lock_path = workspace_root / ".grip" / "state" / "sync.lock" + lock_path.parent.mkdir(parents=True, exist_ok=True) + with lock_path.open("a+", encoding="utf-8") as lock_fh: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) + result = runner.invoke(app, ["sync", "run", str(workspace_root), "--json"]) + + assert result.exit_code == 1 + payload = json.loads(result.stdout) + assert payload["status"] == "blocked" + assert any(item["code"] == "sync_lock_held" for item in payload["blocked"]) + + outbox = _read_outbox(workspace_root) + assert any(row["type"] == "sync.conflict" for row in outbox) + terminal = [row for row in outbox if row["type"] == "sync.completed" and row.get("status") == "blocked"] + assert terminal, "lock contention must still emit terminal sync.completed status=blocked" + + +def test_sync_run_dirty_block_reports_blocked_without_mutation(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + run_sync(workspace_root) + + repo_root = workspace_root / "repos" / "app" + (repo_root / "README.md").write_text("dirty block\n") + + result = runner.invoke(app, ["sync", "run", str(workspace_root), "--dirty", "block", "--json"]) + assert result.exit_code == 1 + payload = json.loads(result.stdout) + assert payload["status"] == "blocked" + assert payload["dirty_mode"] == "block" + assert "app" in payload["dirty_targets"] + assert any(item["code"] == "dirty_shared_repo" for item in payload["blocked"]) + assert repo_root.joinpath("README.md").read_text() == "dirty block\n" + assert _stash_list(repo_root) == [] + + +def test_sync_run_dirty_stash_stashes_changes_and_continues(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + run_sync(workspace_root) + + repo_root = workspace_root / "repos" / "app" + (repo_root / "README.md").write_text("dirty stash\n") + + result = runner.invoke(app, ["sync", "run", str(workspace_root), "--dirty", "stash", "--json"]) + assert result.exit_code == 0 + payload = json.loads(result.stdout) + assert payload["status"] == "success" + assert payload["dirty_mode"] == "stash" + assert "app" in payload["dirty_targets"] + assert _git(repo_root, "status", "--porcelain").stdout.strip() == "" + assert _stash_list(repo_root), "stash mode should leave a git stash entry" + + outbox = _read_outbox(workspace_root) + assert any( + row["type"] == "sync.repo_skipped" and row.get("repo") == "app" and row.get("reason") == "dirty_stashed" + for row in outbox + ) + + +def test_sync_run_dirty_discard_discards_changes_without_stash(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + run_sync(workspace_root) + + repo_root = workspace_root / "repos" / "app" + (repo_root / "README.md").write_text("dirty discard\n") + + result = runner.invoke(app, ["sync", "run", str(workspace_root), "--dirty", "discard", "--json"]) + assert result.exit_code == 0 + payload = json.loads(result.stdout) + assert payload["status"] == "success" + assert payload["dirty_mode"] == "discard" + assert "app" in payload["dirty_targets"] + assert repo_root.joinpath("README.md").read_text() == "# app\n" + assert _stash_list(repo_root) == [] + + outbox = _read_outbox(workspace_root) + assert any( + row["type"] == "sync.repo_skipped" and row.get("repo") == "app" and row.get("reason") == "dirty_discarded" + for row in outbox + ) From 3d0f2ee73f7bf24d54faf9084e59d48fdefd84b5 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:27:12 -0500 Subject: [PATCH 10/18] fix: emit terminal event on sync lock contention --- gr2/python_cli/syncops.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 517ab7d..98a2aab 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -723,6 +723,20 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: "workspace_root": str(workspace_root), }, ) + _emit_sync_event( + workspace_root, + { + "type": "sync.completed", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "status": "blocked", + "blocked_codes": [blocked_issue.code], + "repos_updated": 0, + "repos_skipped": 0, + "repos_failed": 1, + "duration_ms": int((time.monotonic() - started_at) * 1000), + }, + ) return SyncResult( workspace_root=str(workspace_root), status="blocked", From fd01b782938b8a225673cf830c6971c23ddf5bee Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:33:39 -0500 Subject: [PATCH 11/18] test: add pr group lifecycle coverage --- gr2/tests/test_sprint21_sync_platform.py | 207 +++++++++++++++++++++++ 1 file changed, 207 insertions(+) diff --git a/gr2/tests/test_sprint21_sync_platform.py b/gr2/tests/test_sprint21_sync_platform.py index c469e97..092c19e 100644 --- a/gr2/tests/test_sprint21_sync_platform.py +++ b/gr2/tests/test_sprint21_sync_platform.py @@ -75,6 +75,38 @@ def _write_workspace_spec(workspace_root: Path, repo_name: str, repo_url: str) - ) +def _write_workspace_spec_multi(workspace_root: Path, repos: list[tuple[str, str]]) -> None: + spec_path = workspace_root / ".grip" / "workspace_spec.toml" + spec_path.parent.mkdir(parents=True, exist_ok=True) + repo_blocks = [] + for repo_name, repo_url in repos: + repo_blocks.append( + textwrap.dedent( + f""" + [[repos]] + name = "{repo_name}" + path = "repos/{repo_name}" + url = "{repo_url}" + """ + ).strip() + ) + spec_path.write_text( + textwrap.dedent( + f""" + workspace_name = "{workspace_root.name}" + + {'\n\n'.join(repo_blocks)} + + [[units]] + name = "atlas" + path = "agents/atlas/home" + repos = [{", ".join(f'"{name}"' for name, _ in repos)}] + """ + ).strip() + + "\n" + ) + + def _read_outbox(workspace_root: Path) -> list[dict[str, object]]: outbox = workspace_root / ".grip" / "events" / "outbox.jsonl" rows: list[dict[str, object]] = [] @@ -225,6 +257,181 @@ def pr_checks(self, repo: str, number: int) -> list[PRCheck]: assert any(kind == "merge" for kind, _ in calls) +def test_pr_create_persists_group_state_by_pr_group_id(tmp_path: Path, monkeypatch) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, app_url = _init_bare_remote(tmp_path, "app") + _, api_url = _init_bare_remote(tmp_path, "api") + _write_workspace_spec_multi(workspace_root, [("app", app_url), ("api", api_url)]) + run_sync(workspace_root) + + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit="atlas", + lane_name="feat-router", + type="feature", + repos="app,api", + branch="feat/router", + default_commands=[], + source="pytest", + ) + lane_proto.create_lane(ns) + + class FakeAdapter: + name = "fake" + + def create_pr(self, request: CreatePRRequest) -> PRRef: + number = 41 if request.repo == "app" else 42 + return PRRef( + repo=request.repo, + number=number, + url=f"https://example.test/{request.repo}/pull/{number}", + head_branch=request.head_branch, + base_branch=request.base_branch, + title=request.title, + ) + + def merge_pr(self, repo: str, number: int) -> PRRef: # pragma: no cover - not used here + raise AssertionError("merge_pr should not be called") + + def pr_status(self, repo: str, number: int) -> PRStatus: # pragma: no cover - not used here + raise AssertionError("pr_status should not be called") + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: # pragma: no cover + return [] + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: # pragma: no cover + return [] + + monkeypatch.setattr(app_module, "get_platform_adapter", lambda name="github": FakeAdapter()) + + result = runner.invoke(app, ["pr", "create", str(workspace_root), "atlas", "feat-router", "--json"]) + assert result.exit_code == 0 + payload = json.loads(result.stdout) + assert payload["pr_group_id"].startswith("pg_") + assert len(payload["refs"]) == 2 + + group_path = workspace_root / ".grip" / "pr_groups" / f'{payload["pr_group_id"]}.json' + assert group_path.exists(), "group state should be stored by pr_group_id, not lane name" + stored = json.loads(group_path.read_text()) + assert {item["repo"]: item["number"] for item in stored["refs"]} == {"app": 41, "api": 42} + + +def test_pr_status_aggregates_group_state(tmp_path: Path, monkeypatch) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, app_url = _init_bare_remote(tmp_path, "app") + _, api_url = _init_bare_remote(tmp_path, "api") + _write_workspace_spec_multi(workspace_root, [("app", app_url), ("api", api_url)]) + run_sync(workspace_root) + + pr_group_id = "pg_deadbeef" + group_path = workspace_root / ".grip" / "pr_groups" / f"{pr_group_id}.json" + group_path.parent.mkdir(parents=True, exist_ok=True) + group_path.write_text( + json.dumps( + { + "pr_group_id": pr_group_id, + "owner_unit": "atlas", + "lane_name": "feat-router", + "platform": "github", + "refs": [ + {"repo": "app", "number": 41, "url": "https://example.test/app/41"}, + {"repo": "api", "number": 42, "url": "https://example.test/api/42"}, + ], + } + ) + ) + + class FakeAdapter: + name = "fake" + + def create_pr(self, request: CreatePRRequest) -> PRRef: # pragma: no cover + raise AssertionError("create_pr should not be called") + + def merge_pr(self, repo: str, number: int) -> PRRef: # pragma: no cover + raise AssertionError("merge_pr should not be called") + + def pr_status(self, repo: str, number: int) -> PRStatus: + state = "OPEN" if repo == "app" else "MERGED" + ref = PRRef(repo=repo, number=number, url=f"https://example.test/{repo}/{number}") + return PRStatus(ref=ref, state=state, mergeable="MERGEABLE", checks=[]) + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: # pragma: no cover + return [] + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: # pragma: no cover + return [] + + monkeypatch.setattr(app_module, "get_platform_adapter", lambda name="github": FakeAdapter()) + + result = runner.invoke(app, ["pr", "status", str(workspace_root), "atlas", "feat-router", "--json"]) + assert result.exit_code == 0 + payload = json.loads(result.stdout) + assert payload["pr_group_id"] == pr_group_id + assert payload["group_state"] == "partially_merged" + + +def test_pr_merge_reports_partial_failure_and_preserves_state(tmp_path: Path, monkeypatch) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, app_url = _init_bare_remote(tmp_path, "app") + _, api_url = _init_bare_remote(tmp_path, "api") + _write_workspace_spec_multi(workspace_root, [("app", app_url), ("api", api_url)]) + run_sync(workspace_root) + + pr_group_id = "pg_badmerge" + group_path = workspace_root / ".grip" / "pr_groups" / f"{pr_group_id}.json" + group_path.parent.mkdir(parents=True, exist_ok=True) + group_path.write_text( + json.dumps( + { + "pr_group_id": pr_group_id, + "owner_unit": "atlas", + "lane_name": "feat-router", + "platform": "github", + "refs": [ + {"repo": "app", "number": 41, "url": "https://example.test/app/41"}, + {"repo": "api", "number": 42, "url": "https://example.test/api/42"}, + ], + } + ) + ) + + class FakeAdapter: + name = "fake" + + def create_pr(self, request: CreatePRRequest) -> PRRef: # pragma: no cover + raise AssertionError("create_pr should not be called") + + def merge_pr(self, repo: str, number: int) -> PRRef: + if repo == "api": + raise RuntimeError("merge conflict") + return PRRef(repo=repo, number=number, url=f"https://example.test/{repo}/{number}") + + def pr_status(self, repo: str, number: int) -> PRStatus: # pragma: no cover + raise AssertionError("pr_status should not be called") + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: # pragma: no cover + return [] + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: # pragma: no cover + return [] + + monkeypatch.setattr(app_module, "get_platform_adapter", lambda name="github": FakeAdapter()) + + result = runner.invoke(app, ["pr", "merge", str(workspace_root), "atlas", "feat-router", "--json"]) + assert result.exit_code == 1 + payload = json.loads(result.stdout) + assert payload["status"] == "partial_failure" + assert payload["pr_group_id"] == pr_group_id + assert payload["merged"] == ["app"] + assert payload["failed"][0]["repo"] == "api" + + stored = json.loads(group_path.read_text()) + assert stored["group_state"] == "partially_merged" + + def test_sync_run_reports_terminal_blocked_event_on_lock_contention(tmp_path: Path) -> None: workspace_root = tmp_path / "workspace" workspace_root.mkdir() From c92145a98aeafdabb8378d5d5fd5ee16ddd90ad9 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:35:04 -0500 Subject: [PATCH 12/18] feat: persist and aggregate pr group state --- gr2/python_cli/app.py | 80 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 65 insertions(+), 15 deletions(-) diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index 663c631..4195644 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -211,22 +211,40 @@ def _pr_groups_root(workspace_root: Path) -> Path: return workspace_root / ".grip" / "pr_groups" -def _pr_group_path(workspace_root: Path, owner_unit: str, lane_name: str) -> Path: - return _pr_groups_root(workspace_root) / owner_unit / f"{lane_name}.json" +def _pr_group_path(workspace_root: Path, pr_group_id: str) -> Path: + return _pr_groups_root(workspace_root) / f"{pr_group_id}.json" -def _write_pr_group(workspace_root: Path, owner_unit: str, lane_name: str, payload: dict[str, object]) -> Path: - path = _pr_group_path(workspace_root, owner_unit, lane_name) +def _write_pr_group(workspace_root: Path, payload: dict[str, object]) -> Path: + pr_group_id = str(payload["pr_group_id"]) + path = _pr_group_path(workspace_root, pr_group_id) path.parent.mkdir(parents=True, exist_ok=True) path.write_text(json.dumps(payload, indent=2) + "\n") return path -def _load_pr_group(workspace_root: Path, owner_unit: str, lane_name: str) -> dict[str, object]: - path = _pr_group_path(workspace_root, owner_unit, lane_name) - if not path.exists(): - raise SystemExit(f"pr group not found for {owner_unit}/{lane_name}: {path}") - return json.loads(path.read_text()) +def _find_pr_group(workspace_root: Path, owner_unit: str, lane_name: str) -> tuple[Path, dict[str, object]]: + root = _pr_groups_root(workspace_root) + if not root.exists(): + raise SystemExit(f"pr group not found for {owner_unit}/{lane_name}: {root}") + for path in sorted(root.glob("*.json")): + doc = json.loads(path.read_text()) + if doc.get("owner_unit") == owner_unit and doc.get("lane_name") == lane_name: + return path, doc + raise SystemExit(f"pr group not found for {owner_unit}/{lane_name}: {root}") + + +def _group_state_from_statuses(statuses: list[dict[str, object]]) -> str: + states = [str(item.get("state", "")).upper() for item in statuses] + if not states: + return "empty" + if all(state == "MERGED" for state in states): + return "merged" + if any(state == "MERGED" for state in states): + return "partially_merged" + if all(state in {"OPEN", "MERGEABLE", "CLEAN"} for state in states): + return "open" + return "mixed" def _repo_slug_from_url(url: str, fallback_name: str) -> str: @@ -954,8 +972,9 @@ def pr_create( "lane_name": resolved_lane, "platform": platform, "refs": refs, + "group_state": "open", } - path = _write_pr_group(workspace_root, owner_unit, resolved_lane, payload) + path = _write_pr_group(workspace_root, payload) payload["state_path"] = str(path) if json_output: typer.echo(json.dumps(payload, indent=2)) @@ -973,17 +992,22 @@ def pr_status( """Show grouped PR status for a lane.""" workspace_root = workspace_root.resolve() resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) - group = _load_pr_group(workspace_root, owner_unit, resolved_lane) + group_path, group = _find_pr_group(workspace_root, owner_unit, resolved_lane) adapter = get_platform_adapter(str(group.get("platform", "github"))) statuses = [] for ref_doc in group.get("refs", []): ref = PRRef(**ref_doc) statuses.append(adapter.pr_status(ref.repo, int(ref.number)).as_dict()) + group["statuses"] = statuses + group["group_state"] = _group_state_from_statuses(statuses) + _write_pr_group(workspace_root, group) payload = { "pr_group_id": group["pr_group_id"], "owner_unit": owner_unit, "lane_name": resolved_lane, + "group_state": group["group_state"], "statuses": statuses, + "state_path": str(group_path), } if json_output: typer.echo(json.dumps(payload, indent=2)) @@ -1001,7 +1025,7 @@ def pr_checks( """Show grouped PR checks for a lane.""" workspace_root = workspace_root.resolve() resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) - group = _load_pr_group(workspace_root, owner_unit, resolved_lane) + group_path, group = _find_pr_group(workspace_root, owner_unit, resolved_lane) adapter = get_platform_adapter(str(group.get("platform", "github"))) rows = [] for ref_doc in group.get("refs", []): @@ -1018,6 +1042,7 @@ def pr_checks( "owner_unit": owner_unit, "lane_name": resolved_lane, "checks": rows, + "state_path": str(group_path), } if json_output: typer.echo(json.dumps(payload, indent=2)) @@ -1035,17 +1060,42 @@ def pr_merge( """Merge grouped PRs for a lane.""" workspace_root = workspace_root.resolve() resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) - group = _load_pr_group(workspace_root, owner_unit, resolved_lane) + group_path, group = _find_pr_group(workspace_root, owner_unit, resolved_lane) adapter = get_platform_adapter(str(group.get("platform", "github"))) - merged = [] + merged: list[str] = [] + failed: list[dict[str, object]] = [] for ref_doc in group.get("refs", []): ref = PRRef(**ref_doc) - merged.append(adapter.merge_pr(ref.repo, int(ref.number)).as_dict()) + try: + adapter.merge_pr(ref.repo, int(ref.number)) + merged.append(ref.repo) + except Exception as exc: + failed.append({"repo": ref.repo, "number": ref.number, "reason": str(exc)}) + break + if failed: + group["group_state"] = "partially_merged" if merged else "merge_failed" + group["merged"] = merged + _write_pr_group(workspace_root, group) + payload = { + "status": "partial_failure" if merged else "failed", + "pr_group_id": group["pr_group_id"], + "owner_unit": owner_unit, + "lane_name": resolved_lane, + "merged": merged, + "failed": failed, + "state_path": str(group_path), + } + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + raise typer.Exit(code=1) payload = { "pr_group_id": group["pr_group_id"], "owner_unit": owner_unit, "lane_name": resolved_lane, "merged": merged, + "state_path": str(group_path), } if json_output: typer.echo(json.dumps(payload, indent=2)) From 0628cec4cf0729c8920391d4a55ec2932f573ca9 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:40:24 -0500 Subject: [PATCH 13/18] fix: align core sync events to contract --- gr2/python_cli/syncops.py | 49 +++++++++---------- gr2/tests/test_sprint21_sync_platform.py | 62 ++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 26 deletions(-) diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 98a2aab..0b56d00 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -241,6 +241,7 @@ def _append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> No if value >= seq: seq = value + 1 event = { + "version": 1, "seq": seq, "event_id": os.urandom(8).hex(), "timestamp": _now_utc(), @@ -257,6 +258,14 @@ def _emit_sync_event(workspace_root: Path, payload: dict[str, object]) -> None: _append_outbox_event(workspace_root, payload) +def _sync_context(workspace_root: Path, *, actor: str = "system", owner_unit: str = "workspace") -> dict[str, object]: + return { + "workspace": workspace_root.name, + "actor": actor, + "owner_unit": owner_unit, + } + + def _plan_repo_names(plan: SyncPlan) -> list[str]: repo_names: list[str] = [] for op in plan.operations: @@ -565,6 +574,7 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp workspace_root, { "type": "sync.cache_seeded" if created else "sync.cache_refreshed", + **_sync_context(workspace_root), "repo": op.subject, "strategy": SYNC_STRATEGY, "cache_path": str(cache_path), @@ -587,8 +597,8 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp workspace_root, { "type": "sync.repo_updated", + **_sync_context(workspace_root), "repo": op.subject, - "scope": "shared_repo", "old_sha": before_sha, "new_sha": after_sha, "strategy": SYNC_STRATEGY, @@ -622,13 +632,10 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp workspace_root, { "type": "sync.repo_updated", + **_sync_context(workspace_root, owner_unit=owner_unit), "repo": repo_name, - "scope": "lane", - "owner_unit": owner_unit, - "lane": lane_name, "old_sha": before_sha, "new_sha": after_sha, - "branch": expected_branch, "strategy": SYNC_STRATEGY, "commits_pulled": commits_between(target_repo_root, before_sha, after_sha), }, @@ -652,8 +659,8 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp workspace_root, { "type": "sync.repo_skipped", + **_sync_context(workspace_root), "repo": op.subject.split(":")[-1], - "scope": op.scope, "reason": "dirty_stashed", }, ) @@ -667,8 +674,8 @@ def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOp workspace_root, { "type": "sync.repo_skipped", + **_sync_context(workspace_root), "repo": op.subject.split(":")[-1], - "scope": op.scope, "reason": "dirty_discarded", }, ) @@ -718,19 +725,17 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root, { "type": "sync.conflict", - "operation_id": operation_id, + **_sync_context(workspace_root), "reason": "lock_held", - "workspace_root": str(workspace_root), + "repo": workspace_root.name, }, ) _emit_sync_event( workspace_root, { "type": "sync.completed", - "operation_id": operation_id, - "workspace_root": str(workspace_root), + **_sync_context(workspace_root), "status": "blocked", - "blocked_codes": [blocked_issue.code], "repos_updated": 0, "repos_skipped": 0, "repos_failed": 1, @@ -754,9 +759,7 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root, { "type": "sync.started", - "operation_id": operation_id, - "workspace_root": str(workspace_root), - "dirty_mode": dirty_mode, + **_sync_context(workspace_root), "repos": _plan_repo_names(build_sync_plan(workspace_root, dirty_mode=dirty_mode)), "strategy": SYNC_STRATEGY, }, @@ -770,21 +773,18 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root, { "type": "sync.conflict", - "operation_id": operation_id, - "workspace_root": str(workspace_root), + **_sync_context(workspace_root, owner_unit=issue.subject.split("/", 1)[0]), "reason": "active_lease", - "subject": issue.subject, - "leases": issue.details.get("leases", []), + "repo": issue.subject, + "conflicting_files": [], }, ) _emit_sync_event( workspace_root, { "type": "sync.completed", - "operation_id": operation_id, - "workspace_root": str(workspace_root), + **_sync_context(workspace_root), "status": "blocked", - "blocked_codes": [item.code for item in blocked], "repos_updated": 0, "repos_skipped": 0, "repos_failed": len(blocked), @@ -829,11 +829,8 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: workspace_root, { "type": "sync.completed", - "operation_id": operation_id, - "workspace_root": str(workspace_root), + **_sync_context(workspace_root), "status": status, - "applied_count": len(applied), - "failure_codes": [item.code for item in failures], "repos_updated": sum(1 for op in plan.operations if op.kind in {"clone_shared_repo", "materialize_lane_repo"}), "repos_skipped": sum(1 for op in plan.operations if op.kind in {"stash_dirty_repo", "discard_dirty_repo"}), "repos_failed": len(failures), diff --git a/gr2/tests/test_sprint21_sync_platform.py b/gr2/tests/test_sprint21_sync_platform.py index 092c19e..187b9e2 100644 --- a/gr2/tests/test_sprint21_sync_platform.py +++ b/gr2/tests/test_sprint21_sync_platform.py @@ -159,6 +159,68 @@ def test_sync_run_emits_contract_payloads_and_cache_events(tmp_path: Path) -> No assert completed["duration_ms"] >= 0 +def test_sync_core_events_match_hook_event_contract_field_names(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + + result = run_sync(workspace_root) + assert result.status == "success" + + outbox = _read_outbox(workspace_root) + started = next(row for row in outbox if row["type"] == "sync.started") + updated = next(row for row in outbox if row["type"] == "sync.repo_updated") + completed = next(row for row in outbox if row["type"] == "sync.completed") + + started_expected = { + "version", + "event_id", + "seq", + "timestamp", + "type", + "workspace", + "actor", + "owner_unit", + "repos", + "strategy", + } + updated_expected = { + "version", + "event_id", + "seq", + "timestamp", + "type", + "workspace", + "actor", + "owner_unit", + "repo", + "old_sha", + "new_sha", + "strategy", + "commits_pulled", + } + completed_expected = { + "version", + "event_id", + "seq", + "timestamp", + "type", + "workspace", + "actor", + "owner_unit", + "status", + "repos_updated", + "repos_skipped", + "repos_failed", + "duration_ms", + } + + assert set(started.keys()) == started_expected + assert set(updated.keys()) == updated_expected + assert set(completed.keys()) == completed_expected + + def test_sync_run_emits_cache_refresh_event_when_cache_exists(tmp_path: Path) -> None: workspace_root = tmp_path / "workspace" workspace_root.mkdir() From 5fdb8eb69d81aebb5c0752684c10c220238a2d32 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:46:29 -0500 Subject: [PATCH 14/18] test: add workspace materialize event coverage --- gr2/tests/test_sprint21_sync_platform.py | 76 ++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/gr2/tests/test_sprint21_sync_platform.py b/gr2/tests/test_sprint21_sync_platform.py index 187b9e2..4e8bf0b 100644 --- a/gr2/tests/test_sprint21_sync_platform.py +++ b/gr2/tests/test_sprint21_sync_platform.py @@ -52,6 +52,42 @@ def _init_bare_remote(tmp_path: Path, name: str) -> tuple[Path, str]: return remote, remote.as_uri() +def _init_bare_remote_with_projection_hook(tmp_path: Path, name: str) -> tuple[Path, str]: + source = tmp_path / f"{name}-src" + source.mkdir(parents=True, exist_ok=True) + assert _git(source, "init", "-b", "main").returncode == 0 + assert _git(source, "config", "user.name", "Atlas").returncode == 0 + assert _git(source, "config", "user.email", "atlas@example.com").returncode == 0 + (source / "README.md").write_text(f"# {name}\n") + (source / "shared").mkdir(parents=True, exist_ok=True) + (source / "shared" / "CLAUDE.shared.md").write_text("shared claude\n") + (source / ".gr2").mkdir(parents=True, exist_ok=True) + (source / ".gr2" / "hooks.toml").write_text( + textwrap.dedent( + """ + [repo] + name = "app" + + [[files.copy]] + src = "shared/CLAUDE.shared.md" + dest = "{repo_root}/CLAUDE.md" + """ + ).strip() + + "\n" + ) + assert _git(source, "add", "README.md", "shared/CLAUDE.shared.md", ".gr2/hooks.toml").returncode == 0 + assert _git(source, "commit", "-m", "initial").returncode == 0 + + remote = tmp_path / f"{name}.git" + assert subprocess.run( + ["git", "clone", "--bare", str(source), str(remote)], + capture_output=True, + text=True, + check=False, + ).returncode == 0 + return remote, remote.as_uri() + + def _write_workspace_spec(workspace_root: Path, repo_name: str, repo_url: str) -> None: spec_path = workspace_root / ".grip" / "workspace_spec.toml" spec_path.parent.mkdir(parents=True, exist_ok=True) @@ -239,6 +275,46 @@ def test_sync_run_emits_cache_refresh_event_when_cache_exists(tmp_path: Path) -> assert "sync.cache_refreshed" in event_types +def test_workspace_materialize_emits_workspace_materialized_event(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + + result = runner.invoke(app, ["workspace", "materialize", str(workspace_root), "--json", "--yes"]) + assert result.exit_code == 0 + + outbox = _read_outbox(workspace_root) + materialized = next(row for row in outbox if row["type"] == "workspace.materialized") + assert materialized["workspace"] == workspace_root.name + assert materialized["actor"] == "system" + assert materialized["owner_unit"] == "workspace" + assert materialized["repos"] == [{"repo": "app", "first_materialize": True}] + + +def test_workspace_materialize_emits_file_projected_event(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote_with_projection_hook(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + + result = runner.invoke(app, ["workspace", "materialize", str(workspace_root), "--json", "--yes"]) + assert result.exit_code == 0 + + projected_path = workspace_root / "repos" / "app" / "CLAUDE.md" + assert projected_path.exists() + + outbox = _read_outbox(workspace_root) + projected = next(row for row in outbox if row["type"] == "workspace.file_projected") + assert projected["workspace"] == workspace_root.name + assert projected["actor"] == "system" + assert projected["owner_unit"] == "workspace" + assert projected["repo"] == "app" + assert projected["kind"] == "copy" + assert projected["src"] == "repos/app/shared/CLAUDE.shared.md" + assert projected["dest"] == "repos/app/CLAUDE.md" + + def test_pr_command_group_exists_in_python_cli() -> None: result = runner.invoke(app, ["pr", "--help"]) assert result.exit_code == 0 From fd993b2f1c464b1fbf3d3e1bda3d567e7612243b Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:48:11 -0500 Subject: [PATCH 15/18] feat: emit workspace materialize events --- gr2/python_cli/events.py | 261 ++++++----------------------------- gr2/python_cli/hooks.py | 6 + gr2/python_cli/spec_apply.py | 53 ++++++- gr2/python_cli/syncops.py | 51 +------ 4 files changed, 97 insertions(+), 274 deletions(-) diff --git a/gr2/python_cli/events.py b/gr2/python_cli/events.py index 2758e0d..6ba47b8 100644 --- a/gr2/python_cli/events.py +++ b/gr2/python_cli/events.py @@ -1,240 +1,59 @@ -"""gr2 event system runtime. - -Implements the event contract from HOOK-EVENT-CONTRACT.md sections 3-8: -- EventType enum (section 7.2) -- emit() function (sections 4.2, 7.1) -- Outbox management with rotation (sections 4.1-4.4) -- Cursor-based consumer model (section 5.1) -""" from __future__ import annotations import fcntl import json import os -import sys -from datetime import datetime, timezone -from enum import Enum +from datetime import UTC, datetime from pathlib import Path -# Reserved field names that payload keys must not collide with (section 3.1). -_RESERVED_NAMES = frozenset({ - "version", "event_id", "seq", "timestamp", "type", - "workspace", "actor", "agent_id", "owner_unit", -}) - -_ROTATION_THRESHOLD = 10 * 1024 * 1024 # 10 MB - - -class EventType(str, Enum): - # Lane lifecycle - LANE_CREATED = "lane.created" - LANE_ENTERED = "lane.entered" - LANE_EXITED = "lane.exited" - LANE_SWITCHED = "lane.switched" - LANE_ARCHIVED = "lane.archived" - - # Lease lifecycle - LEASE_ACQUIRED = "lease.acquired" - LEASE_RELEASED = "lease.released" - LEASE_EXPIRED = "lease.expired" - LEASE_FORCE_BROKEN = "lease.force_broken" +def _now_utc() -> str: + return datetime.now(UTC).isoformat() - # Hook execution - HOOK_STARTED = "hook.started" - HOOK_COMPLETED = "hook.completed" - HOOK_FAILED = "hook.failed" - HOOK_SKIPPED = "hook.skipped" - # PR lifecycle - PR_CREATED = "pr.created" - PR_STATUS_CHANGED = "pr.status_changed" - PR_CHECKS_PASSED = "pr.checks_passed" - PR_CHECKS_FAILED = "pr.checks_failed" - PR_REVIEW_SUBMITTED = "pr.review_submitted" - PR_MERGED = "pr.merged" - PR_MERGE_FAILED = "pr.merge_failed" +def _events_dir(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" - # Sync operations - SYNC_STARTED = "sync.started" - SYNC_REPO_UPDATED = "sync.repo_updated" - SYNC_REPO_SKIPPED = "sync.repo_skipped" - SYNC_CONFLICT = "sync.conflict" - SYNC_COMPLETED = "sync.completed" - SYNC_CACHE_SEEDED = "sync.cache_seeded" - SYNC_CACHE_REFRESHED = "sync.cache_refreshed" - # Recovery - FAILURE_RESOLVED = "failure.resolved" - LEASE_RECLAIMED = "lease.reclaimed" +def _outbox_file(workspace_root: Path) -> Path: + return _events_dir(workspace_root) / "outbox.jsonl" - # Workspace operations - WORKSPACE_MATERIALIZED = "workspace.materialized" - WORKSPACE_FILE_PROJECTED = "workspace.file_projected" +def _outbox_lock_file(workspace_root: Path) -> Path: + return _events_dir(workspace_root) / "outbox.lock" -def _outbox_path(workspace_root: Path) -> Path: - return workspace_root / ".grip" / "events" / "outbox.jsonl" - -def _cursors_dir(workspace_root: Path) -> Path: - return workspace_root / ".grip" / "events" / "cursors" - - -def _current_seq(outbox: Path) -> int: - """Return the highest seq in the outbox, or 0 if empty/missing.""" - if not outbox.exists(): - return 0 +def append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> None: + outbox_path = _outbox_file(workspace_root) + lock_path = _outbox_lock_file(workspace_root) + outbox_path.parent.mkdir(parents=True, exist_ok=True) + lock_path.parent.mkdir(parents=True, exist_ok=True) try: - text = outbox.read_text() - except OSError: - return 0 - last_seq = 0 - for line in text.strip().split("\n"): - line = line.strip() - if not line: - continue - try: - obj = json.loads(line) - if isinstance(obj, dict) and "seq" in obj: - last_seq = max(last_seq, obj["seq"]) - except (json.JSONDecodeError, TypeError): - continue - return last_seq - - -def _maybe_rotate(outbox: Path) -> None: - """Rotate the outbox file if it exceeds the size threshold.""" - if not outbox.exists(): - return - try: - size = outbox.stat().st_size + with lock_path.open("a+", encoding="utf-8") as lock_fh: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX) + seq = 1 + if outbox_path.exists(): + with outbox_path.open("r", encoding="utf-8") as existing: + for line in existing: + line = line.strip() + if not line: + continue + try: + row = json.loads(line) + except json.JSONDecodeError: + continue + value = int(row.get("seq", 0)) + if value >= seq: + seq = value + 1 + event = { + "version": 1, + "seq": seq, + "event_id": os.urandom(8).hex(), + "timestamp": _now_utc(), + **payload, + } + with outbox_path.open("a", encoding="utf-8") as fh: + fh.write(json.dumps(event) + "\n") + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) except OSError: return - if size <= _ROTATION_THRESHOLD: - return - ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%S") - archive = outbox.parent / f"outbox.{ts}.jsonl" - outbox.rename(archive) - - -def emit( - event_type: EventType, - workspace_root: Path, - actor: str, - owner_unit: str, - payload: dict[str, object], - *, - agent_id: str | None = None, -) -> None: - """Emit a single event to the workspace outbox. - - Builds a flat JSON object from envelope + context + payload fields and - appends it as one line to .grip/events/outbox.jsonl. - - Does not raise on write failure (section 10.1). Errors are logged to - stderr so the parent operation can continue. - """ - # Validate payload keys against reserved names. - collisions = _RESERVED_NAMES & payload.keys() - if collisions: - raise ValueError( - f"payload keys collide with reserved envelope/context names: {collisions}" - ) - - try: - outbox = _outbox_path(workspace_root) - outbox.parent.mkdir(parents=True, exist_ok=True) - lock_path = outbox.with_suffix(".lock") - - with lock_path.open("a+") as lock_fh: - fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX) - try: - # Capture seq before rotation (rotation empties the current file). - seq = _current_seq(outbox) + 1 - _maybe_rotate(outbox) - - # Build flat event object. - event: dict[str, object] = { - "version": 1, - "event_id": os.urandom(8).hex(), - "seq": seq, - "timestamp": datetime.now(timezone.utc).isoformat(), - "type": str(event_type.value), - "workspace": workspace_root.name, - "actor": actor, - "owner_unit": owner_unit, - } - if agent_id is not None: - event["agent_id"] = agent_id - event.update(payload) - - # Append as single JSONL line. - with outbox.open("a") as f: - f.write(json.dumps(event, separators=(",", ":")) + "\n") - f.flush() - finally: - fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) - - except Exception as exc: - print(f"gr2: event emit failed: {exc}", file=sys.stderr) - - -def read_events(workspace_root: Path, consumer: str) -> list[dict[str, object]]: - """Read new events from the outbox for the named consumer. - - Returns events with seq > cursor's last_seq. Updates the cursor file - atomically after reading. - """ - outbox = _outbox_path(workspace_root) - if not outbox.exists(): - return [] - - cursor = _load_cursor(workspace_root, consumer) - last_seq = cursor.get("last_seq", 0) - - events: list[dict[str, object]] = [] - text = outbox.read_text() - for line in text.strip().split("\n"): - line = line.strip() - if not line: - continue - try: - obj = json.loads(line) - except json.JSONDecodeError: - continue - if not isinstance(obj, dict): - continue - if obj.get("seq", 0) <= last_seq: - continue - events.append(obj) - - if events: - last_event = events[-1] - _save_cursor(workspace_root, consumer, { - "consumer": consumer, - "last_seq": last_event["seq"], - "last_event_id": last_event.get("event_id", ""), - "last_read": datetime.now(timezone.utc).isoformat(), - }) - - return events - - -def _load_cursor(workspace_root: Path, consumer: str) -> dict[str, object]: - cursor_file = _cursors_dir(workspace_root) / f"{consumer}.json" - if not cursor_file.exists(): - return {} - try: - return json.loads(cursor_file.read_text()) - except (json.JSONDecodeError, OSError): - return {} - - -def _save_cursor(workspace_root: Path, consumer: str, data: dict[str, object]) -> None: - cursors = _cursors_dir(workspace_root) - cursors.mkdir(parents=True, exist_ok=True) - cursor_file = cursors / f"{consumer}.json" - tmp = cursor_file.with_suffix(".tmp") - tmp.write_text(json.dumps(data, indent=2)) - tmp.rename(cursor_file) diff --git a/gr2/python_cli/hooks.py b/gr2/python_cli/hooks.py index 41c4bf1..08ea9dd 100644 --- a/gr2/python_cli/hooks.py +++ b/gr2/python_cli/hooks.py @@ -84,6 +84,8 @@ class HookResult: returncode: int | None = None stdout: str | None = None stderr: str | None = None + src: str | None = None + dest: str | None = None def as_dict(self) -> dict[str, object]: return dataclasses.asdict(self) @@ -206,6 +208,8 @@ def apply_file_projections(hooks: RepoHooks, ctx: HookContext) -> list[HookResul name=f"{item.kind}:{dest.name}", status="skipped", detail=f"destination already exists and if_exists=skip: {dest}", + src=str(src), + dest=str(dest), ) ) continue @@ -258,6 +262,8 @@ def apply_file_projections(hooks: RepoHooks, ctx: HookContext) -> list[HookResul name=f"{item.kind}:{dest.name}", status="applied", detail=f"{item.kind} {src} -> {dest}", + src=str(src), + dest=str(dest), ) ) return results diff --git a/gr2/python_cli/spec_apply.py b/gr2/python_cli/spec_apply.py index c39214a..593bd4d 100644 --- a/gr2/python_cli/spec_apply.py +++ b/gr2/python_cli/spec_apply.py @@ -2,10 +2,12 @@ import dataclasses import json +import os import tomllib from datetime import UTC, datetime from pathlib import Path +from .events import append_outbox_event from .gitops import clone_repo, ensure_repo_cache, is_git_dir, is_git_repo, repo_dirty from .hooks import HookContext, apply_file_projections, load_repo_hooks, run_lifecycle_stage @@ -264,19 +266,35 @@ def apply_plan(workspace_root: Path, *, yes: bool, manual_hooks: bool = False) - raise SystemExit("plan contains more than 3 operations; rerun with --yes to apply it") applied: list[str] = [] + materialized_repos: list[dict[str, object]] = [] for op in operations: if op.kind == "clone_repo": repo_spec = _find_repo(spec, op.subject) repo_root = workspace_root / str(repo_spec["path"]) cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) first_materialize = clone_repo(str(repo_spec["url"]), repo_root, reference_repo_root=cache_path) - _run_materialize_hooks( + hook_payload = _run_materialize_hooks( workspace_root, repo_root, str(repo_spec["name"]), first_materialize, manual_hooks=manual_hooks, ) + for projection in hook_payload["projected_files"]: + append_outbox_event( + workspace_root, + { + "type": "workspace.file_projected", + "workspace": workspace_root.name, + "actor": "system", + "owner_unit": "workspace", + "repo": str(repo_spec["name"]), + "kind": projection["kind"], + "src": projection["src"], + "dest": projection["dest"], + }, + ) + materialized_repos.append({"repo": str(repo_spec["name"]), "first_materialize": first_materialize}) applied.append(f"cloned repo '{op.subject}' into {repo_root}") elif op.kind == "seed_repo_cache": repo_spec = _find_repo(spec, op.subject) @@ -302,6 +320,17 @@ def apply_plan(workspace_root: Path, *, yes: bool, manual_hooks: bool = False) - if applied: _record_apply_state(workspace_root, applied) + if materialized_repos: + append_outbox_event( + workspace_root, + { + "type": "workspace.materialized", + "workspace": workspace_root.name, + "actor": "system", + "owner_unit": "workspace", + "repos": materialized_repos, + }, + ) return { "workspace_root": str(workspace_root), @@ -342,10 +371,10 @@ def _run_materialize_hooks( first_materialize: bool, *, manual_hooks: bool = False, -) -> None: +) -> dict[str, list[dict[str, object]]]: hooks = load_repo_hooks(repo_root) if not hooks: - return + return {"projected_files": []} ctx = HookContext( workspace_root=workspace_root, lane_root=repo_root, @@ -355,7 +384,7 @@ def _run_materialize_hooks( lane_subject=repo_name, lane_name="workspace", ) - apply_file_projections(hooks, ctx) + projections = apply_file_projections(hooks, ctx) run_lifecycle_stage( hooks, "on_materialize", @@ -364,6 +393,22 @@ def _run_materialize_hooks( first_materialize=first_materialize, allow_manual=manual_hooks, ) + projected_files: list[dict[str, object]] = [] + for result in projections: + if result.status != "applied" or not result.src or not result.dest: + continue + projected_files.append( + { + "kind": result.name.split(":", 1)[0], + "src": _relative_workspace_path(workspace_root, Path(result.src)), + "dest": _relative_workspace_path(workspace_root, Path(result.dest)), + } + ) + return {"projected_files": projected_files} + + +def _relative_workspace_path(workspace_root: Path, path: Path) -> str: + return os.path.relpath(path, workspace_root) def render_unit_toml(unit_spec: dict[str, object]) -> str: diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 0b56d00..e4f8434 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -23,6 +23,7 @@ repo_dirty, stash_if_dirty, ) +from .events import append_outbox_event from .hooks import load_repo_hooks from .spec_apply import ( ValidationIssue, @@ -198,60 +199,12 @@ def _operation_id() -> str: return os.urandom(8).hex() -def _now_utc() -> str: - return datetime.now(UTC).isoformat() - - -def _events_dir(workspace_root: Path) -> Path: - return workspace_root / ".grip" / "events" - - -def _outbox_file(workspace_root: Path) -> Path: - return _events_dir(workspace_root) / "outbox.jsonl" - - -def _outbox_lock_file(workspace_root: Path) -> Path: - return _events_dir(workspace_root) / "outbox.lock" - - def _sync_lock_file(workspace_root: Path) -> Path: return workspace_root / ".grip" / "state" / "sync.lock" def _append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> None: - outbox_path = _outbox_file(workspace_root) - lock_path = _outbox_lock_file(workspace_root) - outbox_path.parent.mkdir(parents=True, exist_ok=True) - lock_path.parent.mkdir(parents=True, exist_ok=True) - try: - with lock_path.open("a+", encoding="utf-8") as lock_fh: - fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX) - seq = 1 - if outbox_path.exists(): - with outbox_path.open("r", encoding="utf-8") as existing: - for line in existing: - line = line.strip() - if not line: - continue - try: - row = json.loads(line) - except json.JSONDecodeError: - continue - value = int(row.get("seq", 0)) - if value >= seq: - seq = value + 1 - event = { - "version": 1, - "seq": seq, - "event_id": os.urandom(8).hex(), - "timestamp": _now_utc(), - **payload, - } - with outbox_path.open("a", encoding="utf-8") as fh: - fh.write(json.dumps(event) + "\n") - fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) - except OSError: - return + append_outbox_event(workspace_root, payload) def _emit_sync_event(workspace_root: Path, payload: dict[str, object]) -> None: From 584b611e02d873291b49c021d8ef323d575b313d Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:51:41 -0500 Subject: [PATCH 16/18] test: add failure marker and sync conflict coverage --- gr2/tests/test_sprint21_sync_platform.py | 148 +++++++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/gr2/tests/test_sprint21_sync_platform.py b/gr2/tests/test_sprint21_sync_platform.py index 4e8bf0b..bf81421 100644 --- a/gr2/tests/test_sprint21_sync_platform.py +++ b/gr2/tests/test_sprint21_sync_platform.py @@ -88,6 +88,42 @@ def _init_bare_remote_with_projection_hook(tmp_path: Path, name: str) -> tuple[P return remote, remote.as_uri() +def _init_bare_remote_with_failing_on_enter_hook(tmp_path: Path, name: str) -> tuple[Path, str]: + source = tmp_path / f"{name}-src" + source.mkdir(parents=True, exist_ok=True) + assert _git(source, "init", "-b", "main").returncode == 0 + assert _git(source, "config", "user.name", "Atlas").returncode == 0 + assert _git(source, "config", "user.email", "atlas@example.com").returncode == 0 + (source / "README.md").write_text(f"# {name}\n") + (source / ".gr2").mkdir(parents=True, exist_ok=True) + (source / ".gr2" / "hooks.toml").write_text( + textwrap.dedent( + """ + [repo] + name = "app" + + [[lifecycle.on_enter]] + name = "boom" + command = "sh -c 'echo hook boom >&2; exit 7'" + when = "always" + on_failure = "block" + """ + ).strip() + + "\n" + ) + assert _git(source, "add", "README.md", ".gr2/hooks.toml").returncode == 0 + assert _git(source, "commit", "-m", "initial").returncode == 0 + + remote = tmp_path / f"{name}.git" + assert subprocess.run( + ["git", "clone", "--bare", str(source), str(remote)], + capture_output=True, + text=True, + check=False, + ).returncode == 0 + return remote, remote.as_uri() + + def _write_workspace_spec(workspace_root: Path, repo_name: str, repo_url: str) -> None: spec_path = workspace_root / ".grip" / "workspace_spec.toml" spec_path.parent.mkdir(parents=True, exist_ok=True) @@ -315,6 +351,118 @@ def test_workspace_materialize_emits_file_projected_event(tmp_path: Path) -> Non assert projected["dest"] == "repos/app/CLAUDE.md" +def test_lane_enter_hook_failure_writes_marker_and_resolve_emits_event(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote_with_failing_on_enter_hook(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + + materialize = runner.invoke(app, ["workspace", "materialize", str(workspace_root), "--yes", "--json"]) + assert materialize.exit_code == 0 + + create = runner.invoke( + app, + [ + "lane", + "create", + str(workspace_root), + "atlas", + "feat-auth", + "--repos", + "app", + "--branch", + "feat/auth", + ], + ) + assert create.exit_code == 0 + + enter = runner.invoke( + app, + ["lane", "enter", str(workspace_root), "atlas", "feat-auth", "--actor", "agent:atlas"], + ) + assert enter.exit_code != 0 + + failures_root = workspace_root / ".grip" / "state" / "failures" + markers = sorted(failures_root.glob("*.json")) + assert len(markers) == 1 + marker = json.loads(markers[0].read_text()) + assert marker["operation"] == "lane.enter" + assert marker["stage"] == "on_enter" + assert marker["hook_name"] == "boom" + assert marker["repo"] == "app" + assert marker["owner_unit"] == "atlas" + assert marker["lane_name"] == "feat-auth" + assert marker["resolved"] is False + + blocked = runner.invoke( + app, + ["lane", "enter", str(workspace_root), "atlas", "feat-auth", "--actor", "agent:atlas"], + ) + assert blocked.exit_code != 0 + assert marker["operation_id"] in blocked.stdout + + resolve = runner.invoke( + app, + [ + "lane", + "resolve", + str(workspace_root), + "atlas", + marker["operation_id"], + "--actor", + "agent:atlas", + "--resolution", + "retry", + "--json", + ], + ) + assert resolve.exit_code == 0 + resolved_payload = json.loads(resolve.stdout) + assert resolved_payload["operation_id"] == marker["operation_id"] + assert not markers[0].exists() + + outbox = _read_outbox(workspace_root) + resolved = next(row for row in outbox if row["type"] == "failure.resolved") + assert resolved["operation_id"] == marker["operation_id"] + assert resolved["resolved_by"] == "agent:atlas" + assert resolved["resolution"] == "retry" + assert resolved["lane_name"] == "feat-auth" + + +def test_sync_conflict_emits_conflicting_files_for_unmerged_repo(tmp_path: Path) -> None: + workspace_root = tmp_path / "workspace" + workspace_root.mkdir() + _, repo_url = _init_bare_remote(tmp_path, "app") + _write_workspace_spec(workspace_root, "app", repo_url) + run_sync(workspace_root) + + repo_root = workspace_root / "repos" / "app" + assert _git(repo_root, "config", "user.name", "Atlas").returncode == 0 + assert _git(repo_root, "config", "user.email", "atlas@example.com").returncode == 0 + + (repo_root / "README.md").write_text("left\n") + assert _git(repo_root, "commit", "-am", "left").returncode == 0 + assert _git(repo_root, "checkout", "-b", "other", "HEAD~1").returncode == 0 + (repo_root / "README.md").write_text("right\n") + assert _git(repo_root, "commit", "-am", "right").returncode == 0 + assert _git(repo_root, "checkout", "main").returncode == 0 + + merge = _git(repo_root, "merge", "other") + assert merge.returncode != 0 + assert str(repo_root / "README.md").endswith("README.md") + + result = runner.invoke(app, ["sync", "run", str(workspace_root), "--dirty", "block", "--json"]) + assert result.exit_code == 1 + + outbox = _read_outbox(workspace_root) + conflict = next( + row + for row in outbox + if row["type"] == "sync.conflict" and row.get("repo") == "app" and row.get("conflicting_files") + ) + assert conflict["conflicting_files"] == ["README.md"] + + def test_pr_command_group_exists_in_python_cli() -> None: result = runner.invoke(app, ["pr", "--help"]) assert result.exit_code == 0 From 4ebd5789ab81ae22dae09fa3e6b0e021c493a630 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 12:53:35 -0500 Subject: [PATCH 17/18] feat: add failure markers and sync conflict details --- gr2/python_cli/app.py | 62 +++++++++++++++++++++-- gr2/python_cli/events.py | 5 +- gr2/python_cli/failures.py | 100 +++++++++++++++++++++++++++++++++++++ gr2/python_cli/gitops.py | 7 +++ gr2/python_cli/syncops.py | 25 +++++++++- 5 files changed, 192 insertions(+), 7 deletions(-) create mode 100644 gr2/python_cli/failures.py diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index 4195644..c7969da 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -11,6 +11,7 @@ import typer from . import execops +from . import failures from . import migration from . import syncops from .gitops import ( @@ -25,8 +26,8 @@ stash_if_dirty, ) from .events import emit, EventType -from .hooks import HookContext, apply_file_projections, load_repo_hooks, run_lifecycle_stage -from .platform import CreatePRRequest, PRRef, get_platform_adapter +from .hooks import HookContext, HookRuntimeError, apply_file_projections, load_repo_hooks, run_lifecycle_stage +from .platform import PRRef, get_platform_adapter from . import spec_apply from gr2.prototypes import lane_workspace_prototype as lane_proto from gr2.prototypes import repo_maintenance_prototype as repo_proto @@ -706,7 +707,38 @@ def lane_enter( ) -> None: """Enter a lane and optionally emit channel/recall-compatible events.""" workspace_root = workspace_root.resolve() - _run_lane_stage(workspace_root, owner_unit, lane_name, "on_enter", manual_hooks=manual_hooks) + unresolved = failures.unresolved_lane_failure(workspace_root, owner_unit, lane_name) + if unresolved: + typer.echo( + json.dumps( + { + "status": "blocked", + "code": "unresolved_failure_marker", + "operation_id": unresolved["operation_id"], + "lane_name": lane_name, + }, + indent=2, + ) + ) + raise typer.Exit(code=1) + try: + _run_lane_stage(workspace_root, owner_unit, lane_name, "on_enter", manual_hooks=manual_hooks) + except HookRuntimeError as exc: + payload = exc.payload + repo_name = Path(str(payload.get("cwd", ""))).name or lane_name + event = failures.write_failure_marker( + workspace_root, + operation="lane.enter", + stage=str(payload.get("stage", "on_enter")), + hook_name=str(payload.get("hook", payload.get("name", "unknown"))), + repo=repo_name, + owner_unit=owner_unit, + lane_name=lane_name, + partial_state={}, + event_id=None, + ) + typer.echo(json.dumps(event, indent=2)) + raise typer.Exit(code=1) ns = SimpleNamespace( workspace_root=workspace_root, owner_unit=owner_unit, @@ -730,6 +762,30 @@ def lane_enter( ) +@lane_app.command("resolve") +def lane_resolve( + workspace_root: Path, + owner_unit: str, + operation_id: str, + actor: str = typer.Option(..., help="Actor label, e.g. agent:atlas"), + resolution: str = typer.Option(..., help="Resolution note: retry | skip | escalate"), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Resolve a blocking failure marker for a lane-scoped operation.""" + workspace_root = workspace_root.resolve() + payload = failures.resolve_failure_marker( + workspace_root, + operation_id=operation_id, + resolved_by=actor, + resolution=resolution, + owner_unit=owner_unit, + ) + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + + @lane_app.command("exit") def lane_exit( workspace_root: Path, diff --git a/gr2/python_cli/events.py b/gr2/python_cli/events.py index 6ba47b8..7cfa9ad 100644 --- a/gr2/python_cli/events.py +++ b/gr2/python_cli/events.py @@ -23,7 +23,7 @@ def _outbox_lock_file(workspace_root: Path) -> Path: return _events_dir(workspace_root) / "outbox.lock" -def append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> None: +def append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> dict[str, object] | None: outbox_path = _outbox_file(workspace_root) lock_path = _outbox_lock_file(workspace_root) outbox_path.parent.mkdir(parents=True, exist_ok=True) @@ -55,5 +55,6 @@ def append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> Non with outbox_path.open("a", encoding="utf-8") as fh: fh.write(json.dumps(event) + "\n") fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) + return event except OSError: - return + return None diff --git a/gr2/python_cli/failures.py b/gr2/python_cli/failures.py new file mode 100644 index 0000000..c681774 --- /dev/null +++ b/gr2/python_cli/failures.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import json +import os +from datetime import UTC, datetime +from pathlib import Path + +from .events import append_outbox_event + + +def _now_utc() -> str: + return datetime.now(UTC).isoformat() + + +def failures_root(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "state" / "failures" + + +def failure_marker_path(workspace_root: Path, operation_id: str) -> Path: + return failures_root(workspace_root) / f"{operation_id}.json" + + +def unresolved_lane_failure(workspace_root: Path, owner_unit: str, lane_name: str) -> dict[str, object] | None: + root = failures_root(workspace_root) + if not root.exists(): + return None + for path in sorted(root.glob("*.json")): + doc = json.loads(path.read_text()) + if doc.get("resolved") is True: + continue + if doc.get("owner_unit") == owner_unit and doc.get("lane_name") == lane_name: + return doc + return None + + +def write_failure_marker( + workspace_root: Path, + *, + operation: str, + stage: str, + hook_name: str, + repo: str, + owner_unit: str, + lane_name: str, + partial_state: dict[str, object] | None = None, + event_id: str | None = None, +) -> dict[str, object]: + operation_id = f"op_{os.urandom(4).hex()}" + marker = { + "operation_id": operation_id, + "operation": operation, + "stage": stage, + "hook_name": hook_name, + "repo": repo, + "owner_unit": owner_unit, + "lane_name": lane_name, + "failed_at": _now_utc(), + "event_id": event_id, + "partial_state": partial_state or {}, + "resolved": False, + } + path = failure_marker_path(workspace_root, operation_id) + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(marker, indent=2) + "\n") + return marker + + +def resolve_failure_marker( + workspace_root: Path, + *, + operation_id: str, + resolved_by: str, + resolution: str, + owner_unit: str, +) -> dict[str, object]: + path = failure_marker_path(workspace_root, operation_id) + if not path.exists(): + raise SystemExit(f"failure marker not found: {operation_id}") + marker = json.loads(path.read_text()) + path.unlink() + event = append_outbox_event( + workspace_root, + { + "type": "failure.resolved", + "workspace": workspace_root.name, + "actor": resolved_by, + "owner_unit": owner_unit, + "operation_id": operation_id, + "resolved_by": resolved_by, + "resolution": resolution, + "lane_name": marker.get("lane_name", ""), + }, + ) + return { + "operation_id": operation_id, + "resolved_by": resolved_by, + "resolution": resolution, + "lane_name": marker.get("lane_name", ""), + "event_id": None if event is None else event["event_id"], + } diff --git a/gr2/python_cli/gitops.py b/gr2/python_cli/gitops.py index 2a31c49..832026f 100644 --- a/gr2/python_cli/gitops.py +++ b/gr2/python_cli/gitops.py @@ -55,6 +55,13 @@ def commits_between(path: Path, old_sha: str | None, new_sha: str | None) -> int return 0 +def conflicting_files(path: Path) -> list[str]: + proc = git(path, "diff", "--name-only", "--diff-filter=U") + if proc.returncode != 0: + return [] + return [line.strip() for line in proc.stdout.splitlines() if line.strip()] + + def ensure_repo_cache(url: str, cache_repo_root: Path) -> bool: """Ensure a local bare mirror exists for a repo URL. diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index e4f8434..55815a0 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -13,6 +13,7 @@ from .gitops import ( clone_repo, commits_between, + conflicting_files, current_branch, current_head_sha, discard_if_dirty, @@ -318,6 +319,7 @@ def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncP ) else: if repo_dirty(repo_root): + repo_conflicts = conflicting_files(repo_root) if dirty_mode == "block": issues.append( SyncIssue( @@ -328,7 +330,7 @@ def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncP message=f"shared repo has uncommitted changes and blocks sync: {repo_root}", blocks=True, path=str(repo_root), - details={"dirty_mode": dirty_mode}, + details={"dirty_mode": dirty_mode, "conflicting_files": repo_conflicts}, ) ) else: @@ -424,6 +426,7 @@ def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncP ) continue if repo_dirty(lane_repo_root): + repo_conflicts = conflicting_files(lane_repo_root) if dirty_mode == "block": issues.append( SyncIssue( @@ -434,7 +437,11 @@ def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncP message=f"lane repo has uncommitted changes and blocks sync: {lane_repo_root}", blocks=True, path=str(lane_repo_root), - details={"expected_branch": expected_branch, "dirty_mode": dirty_mode}, + details={ + "expected_branch": expected_branch, + "dirty_mode": dirty_mode, + "conflicting_files": repo_conflicts, + }, ) ) else: @@ -732,6 +739,20 @@ def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: "conflicting_files": [], }, ) + elif issue.code in {"dirty_shared_repo", "dirty_lane_repo"} and issue.details.get("conflicting_files"): + repo_name = issue.subject.split(":")[-1] + _emit_sync_event( + workspace_root, + { + "type": "sync.conflict", + **_sync_context( + workspace_root, + owner_unit=issue.subject.split("/", 1)[0] if issue.scope == "lane" else "workspace", + ), + "repo": repo_name, + "conflicting_files": list(issue.details.get("conflicting_files", [])), + }, + ) _emit_sync_event( workspace_root, { From e6ff502e400d9509232310abefbe795d6ca84ee5 Mon Sep 17 00:00:00 2001 From: Atlas Date: Wed, 15 Apr 2026 13:03:31 -0500 Subject: [PATCH 18/18] feat: converge on shared events and pr orchestration --- gr2/python_cli/app.py | 129 ++++++------ gr2/python_cli/events.py | 243 +++++++++++++++++++---- gr2/python_cli/failures.py | 16 +- gr2/python_cli/pr.py | 35 +--- gr2/python_cli/spec_apply.py | 30 ++- gr2/python_cli/syncops.py | 18 +- gr2/tests/test_sprint21_sync_platform.py | 22 +- 7 files changed, 321 insertions(+), 172 deletions(-) diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index c7969da..6e9ed4b 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -13,6 +13,7 @@ from . import execops from . import failures from . import migration +from . import pr as pr_ops from . import syncops from .gitops import ( branch_exists, @@ -208,24 +209,8 @@ def _resolve_lane_name(workspace_root: Path, owner_unit: str, lane_name: Optiona return str(current_doc["current"]["lane_name"]) -def _pr_groups_root(workspace_root: Path) -> Path: - return workspace_root / ".grip" / "pr_groups" - - -def _pr_group_path(workspace_root: Path, pr_group_id: str) -> Path: - return _pr_groups_root(workspace_root) / f"{pr_group_id}.json" - - -def _write_pr_group(workspace_root: Path, payload: dict[str, object]) -> Path: - pr_group_id = str(payload["pr_group_id"]) - path = _pr_group_path(workspace_root, pr_group_id) - path.parent.mkdir(parents=True, exist_ok=True) - path.write_text(json.dumps(payload, indent=2) + "\n") - return path - - def _find_pr_group(workspace_root: Path, owner_unit: str, lane_name: str) -> tuple[Path, dict[str, object]]: - root = _pr_groups_root(workspace_root) + root = workspace_root / ".grip" / "pr_groups" if not root.exists(): raise SystemExit(f"pr group not found for {owner_unit}/{lane_name}: {root}") for path in sorted(root.glob("*.json")): @@ -1007,31 +992,24 @@ def pr_create( lane_doc = lane_proto.load_lane_doc(workspace_root, owner_unit, resolved_lane) spec = lane_proto.load_workspace_spec(workspace_root) adapter = get_platform_adapter(platform) - pr_group_id = f"pg_{os.urandom(4).hex()}" - refs: list[dict[str, object]] = [] branch_map = dict(lane_doc.get("branch_map", {})) + repos: list[str] = [] for repo_name in lane_doc.get("repos", []): repo_spec = next(repo for repo in spec.get("repos", []) if repo.get("name") == repo_name) - request = CreatePRRequest( - repo=_repo_slug_from_url(str(repo_spec.get("url", "")), repo_name), - title=resolved_lane, - body=f"gr2 PR group {pr_group_id} for {owner_unit}/{resolved_lane}", - head_branch=str(branch_map.get(repo_name, resolved_lane)), - base_branch=base_branch, - draft=draft, - ) - ref = adapter.create_pr(request) - refs.append(ref.as_dict()) - payload = { - "pr_group_id": pr_group_id, - "owner_unit": owner_unit, - "lane_name": resolved_lane, - "platform": platform, - "refs": refs, - "group_state": "open", - } - path = _write_pr_group(workspace_root, payload) - payload["state_path"] = str(path) + repos.append(_repo_slug_from_url(str(repo_spec.get("url", "")), repo_name)) + payload = pr_ops.create_pr_group( + workspace_root=workspace_root, + owner_unit=owner_unit, + lane_name=resolved_lane, + title=resolved_lane, + base_branch=base_branch, + head_branch=str(branch_map.get(next(iter(lane_doc.get("repos", [])), resolved_lane), resolved_lane)), + repos=repos, + adapter=adapter, + actor=f"agent:{owner_unit}", + body=f"gr2 PR group for {owner_unit}/{resolved_lane}", + draft=draft, + ) if json_output: typer.echo(json.dumps(payload, indent=2)) else: @@ -1050,18 +1028,22 @@ def pr_status( resolved_lane = _resolve_lane_name(workspace_root, owner_unit, lane_name) group_path, group = _find_pr_group(workspace_root, owner_unit, resolved_lane) adapter = get_platform_adapter(str(group.get("platform", "github"))) + group = pr_ops.check_pr_group_status( + workspace_root=workspace_root, + pr_group_id=str(group["pr_group_id"]), + adapter=adapter, + actor=f"agent:{owner_unit}", + ) statuses = [] - for ref_doc in group.get("refs", []): - ref = PRRef(**ref_doc) - statuses.append(adapter.pr_status(ref.repo, int(ref.number)).as_dict()) - group["statuses"] = statuses - group["group_state"] = _group_state_from_statuses(statuses) - _write_pr_group(workspace_root, group) + for pr_info in group.get("prs", []): + repo = str(pr_info["repo"]) + number = int(pr_info["pr_number"]) + statuses.append(adapter.pr_status(repo, number).as_dict()) payload = { "pr_group_id": group["pr_group_id"], "owner_unit": owner_unit, "lane_name": resolved_lane, - "group_state": group["group_state"], + "group_state": _group_state_from_statuses(statuses), "statuses": statuses, "state_path": str(group_path), } @@ -1084,8 +1066,8 @@ def pr_checks( group_path, group = _find_pr_group(workspace_root, owner_unit, resolved_lane) adapter = get_platform_adapter(str(group.get("platform", "github"))) rows = [] - for ref_doc in group.get("refs", []): - ref = PRRef(**ref_doc) + for pr_info in group.get("prs", []): + ref = PRRef(repo=str(pr_info["repo"]), number=int(pr_info["pr_number"]), url=pr_info.get("url")) rows.append( { "repo": ref.repo, @@ -1120,18 +1102,10 @@ def pr_merge( adapter = get_platform_adapter(str(group.get("platform", "github"))) merged: list[str] = [] failed: list[dict[str, object]] = [] - for ref_doc in group.get("refs", []): - ref = PRRef(**ref_doc) - try: - adapter.merge_pr(ref.repo, int(ref.number)) - merged.append(ref.repo) - except Exception as exc: - failed.append({"repo": ref.repo, "number": ref.number, "reason": str(exc)}) - break if failed: group["group_state"] = "partially_merged" if merged else "merge_failed" group["merged"] = merged - _write_pr_group(workspace_root, group) + group_path.write_text(json.dumps(group, indent=2) + "\n") payload = { "status": "partial_failure" if merged else "failed", "pr_group_id": group["pr_group_id"], @@ -1146,13 +1120,40 @@ def pr_merge( else: typer.echo(json.dumps(payload, indent=2)) raise typer.Exit(code=1) - payload = { - "pr_group_id": group["pr_group_id"], - "owner_unit": owner_unit, - "lane_name": resolved_lane, - "merged": merged, - "state_path": str(group_path), - } + try: + pr_ops.merge_pr_group( + workspace_root=workspace_root, + pr_group_id=str(group["pr_group_id"]), + adapter=adapter, + actor=f"agent:{owner_unit}", + ) + merged = [str(pr_info["repo"]) for pr_info in group.get("prs", [])] + payload = { + "pr_group_id": group["pr_group_id"], + "owner_unit": owner_unit, + "lane_name": resolved_lane, + "merged": merged, + "state_path": str(group_path), + } + except pr_ops.PRMergeError as exc: + merged = [str(pr_info["repo"]) for pr_info in group.get("prs", []) if str(pr_info["repo"]) != exc.repo] + group["group_state"] = "partially_merged" if merged else "merge_failed" + group["merged"] = merged + group_path.write_text(json.dumps(group, indent=2) + "\n") + payload = { + "status": "partial_failure" if merged else "failed", + "pr_group_id": group["pr_group_id"], + "owner_unit": owner_unit, + "lane_name": resolved_lane, + "merged": merged, + "failed": [{"repo": exc.repo, "number": exc.pr_number, "reason": exc.reason}], + "state_path": str(group_path), + } + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + raise typer.Exit(code=1) if json_output: typer.echo(json.dumps(payload, indent=2)) else: diff --git a/gr2/python_cli/events.py b/gr2/python_cli/events.py index 7cfa9ad..9f51ab9 100644 --- a/gr2/python_cli/events.py +++ b/gr2/python_cli/events.py @@ -1,60 +1,217 @@ +"""gr2 event system runtime. + +Implements the event contract from HOOK-EVENT-CONTRACT.md sections 3-8: +- EventType enum (section 7.2) +- emit() function (sections 4.2, 7.1) +- Outbox management with rotation (sections 4.1-4.4) +- Cursor-based consumer model (section 5.1) +""" from __future__ import annotations -import fcntl import json import os -from datetime import UTC, datetime +import sys +from datetime import datetime, timezone +from enum import Enum from pathlib import Path -def _now_utc() -> str: - return datetime.now(UTC).isoformat() +_RESERVED_NAMES = frozenset( + { + "version", + "event_id", + "seq", + "timestamp", + "type", + "workspace", + "actor", + "agent_id", + "owner_unit", + } +) + +_ROTATION_THRESHOLD = 10 * 1024 * 1024 + + +class EventType(str, Enum): + LANE_CREATED = "lane.created" + LANE_ENTERED = "lane.entered" + LANE_EXITED = "lane.exited" + LANE_SWITCHED = "lane.switched" + LANE_ARCHIVED = "lane.archived" + + LEASE_ACQUIRED = "lease.acquired" + LEASE_RELEASED = "lease.released" + LEASE_EXPIRED = "lease.expired" + LEASE_FORCE_BROKEN = "lease.force_broken" + + HOOK_STARTED = "hook.started" + HOOK_COMPLETED = "hook.completed" + HOOK_FAILED = "hook.failed" + HOOK_SKIPPED = "hook.skipped" + + PR_CREATED = "pr.created" + PR_STATUS_CHANGED = "pr.status_changed" + PR_CHECKS_PASSED = "pr.checks_passed" + PR_CHECKS_FAILED = "pr.checks_failed" + PR_REVIEW_SUBMITTED = "pr.review_submitted" + PR_MERGED = "pr.merged" + PR_MERGE_FAILED = "pr.merge_failed" + + SYNC_STARTED = "sync.started" + SYNC_CACHE_SEEDED = "sync.cache_seeded" + SYNC_CACHE_REFRESHED = "sync.cache_refreshed" + SYNC_REPO_UPDATED = "sync.repo_updated" + SYNC_REPO_SKIPPED = "sync.repo_skipped" + SYNC_CONFLICT = "sync.conflict" + SYNC_COMPLETED = "sync.completed" + FAILURE_RESOLVED = "failure.resolved" + LEASE_RECLAIMED = "lease.reclaimed" -def _events_dir(workspace_root: Path) -> Path: - return workspace_root / ".grip" / "events" + WORKSPACE_MATERIALIZED = "workspace.materialized" + WORKSPACE_FILE_PROJECTED = "workspace.file_projected" -def _outbox_file(workspace_root: Path) -> Path: - return _events_dir(workspace_root) / "outbox.jsonl" +def _outbox_path(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" / "outbox.jsonl" -def _outbox_lock_file(workspace_root: Path) -> Path: - return _events_dir(workspace_root) / "outbox.lock" +def _cursors_dir(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" / "cursors" -def append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> dict[str, object] | None: - outbox_path = _outbox_file(workspace_root) - lock_path = _outbox_lock_file(workspace_root) - outbox_path.parent.mkdir(parents=True, exist_ok=True) - lock_path.parent.mkdir(parents=True, exist_ok=True) +def _current_seq(outbox: Path) -> int: + if not outbox.exists(): + return 0 try: - with lock_path.open("a+", encoding="utf-8") as lock_fh: - fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX) - seq = 1 - if outbox_path.exists(): - with outbox_path.open("r", encoding="utf-8") as existing: - for line in existing: - line = line.strip() - if not line: - continue - try: - row = json.loads(line) - except json.JSONDecodeError: - continue - value = int(row.get("seq", 0)) - if value >= seq: - seq = value + 1 - event = { - "version": 1, - "seq": seq, - "event_id": os.urandom(8).hex(), - "timestamp": _now_utc(), - **payload, - } - with outbox_path.open("a", encoding="utf-8") as fh: - fh.write(json.dumps(event) + "\n") - fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) - return event + text = outbox.read_text() except OSError: - return None + return 0 + last_seq = 0 + for line in text.strip().split("\n"): + line = line.strip() + if not line: + continue + try: + obj = json.loads(line) + if isinstance(obj, dict) and "seq" in obj: + last_seq = max(last_seq, obj["seq"]) + except (json.JSONDecodeError, TypeError): + continue + return last_seq + + +def _maybe_rotate(outbox: Path) -> None: + if not outbox.exists(): + return + try: + size = outbox.stat().st_size + except OSError: + return + if size <= _ROTATION_THRESHOLD: + return + ts = datetime.now(timezone.utc).strftime("%Y%m%dT%H%M%S") + archive = outbox.parent / f"outbox.{ts}.jsonl" + outbox.rename(archive) + + +def emit( + event_type: EventType, + workspace_root: Path, + actor: str, + owner_unit: str, + payload: dict[str, object], + *, + agent_id: str | None = None, +) -> None: + collisions = _RESERVED_NAMES & payload.keys() + if collisions: + raise ValueError(f"payload keys collide with reserved envelope/context names: {collisions}") + + try: + outbox = _outbox_path(workspace_root) + outbox.parent.mkdir(parents=True, exist_ok=True) + + seq = _current_seq(outbox) + 1 + _maybe_rotate(outbox) + + event: dict[str, object] = { + "version": 1, + "event_id": os.urandom(8).hex(), + "seq": seq, + "timestamp": datetime.now(timezone.utc).isoformat(), + "type": str(event_type.value), + "workspace": workspace_root.name, + "actor": actor, + "owner_unit": owner_unit, + } + if agent_id is not None: + event["agent_id"] = agent_id + event.update(payload) + + with outbox.open("a") as f: + f.write(json.dumps(event, separators=(",", ":")) + "\n") + f.flush() + + except Exception as exc: + print(f"gr2: event emit failed: {exc}", file=sys.stderr) + + +def read_events(workspace_root: Path, consumer: str) -> list[dict[str, object]]: + outbox = _outbox_path(workspace_root) + if not outbox.exists(): + return [] + + cursor = _load_cursor(workspace_root, consumer) + last_seq = cursor.get("last_seq", 0) + + events: list[dict[str, object]] = [] + text = outbox.read_text() + for line in text.strip().split("\n"): + line = line.strip() + if not line: + continue + try: + obj = json.loads(line) + except json.JSONDecodeError: + continue + if not isinstance(obj, dict): + continue + if obj.get("seq", 0) <= last_seq: + continue + events.append(obj) + + if events: + last_event = events[-1] + _save_cursor( + workspace_root, + consumer, + { + "consumer": consumer, + "last_seq": last_event["seq"], + "last_event_id": last_event.get("event_id", ""), + "last_read": datetime.now(timezone.utc).isoformat(), + }, + ) + + return events + + +def _load_cursor(workspace_root: Path, consumer: str) -> dict[str, object]: + cursor_file = _cursors_dir(workspace_root) / f"{consumer}.json" + if not cursor_file.exists(): + return {} + try: + return json.loads(cursor_file.read_text()) + except (json.JSONDecodeError, OSError): + return {} + + +def _save_cursor(workspace_root: Path, consumer: str, data: dict[str, object]) -> None: + cursors = _cursors_dir(workspace_root) + cursors.mkdir(parents=True, exist_ok=True) + cursor_file = cursors / f"{consumer}.json" + tmp = cursor_file.with_suffix(".tmp") + tmp.write_text(json.dumps(data, indent=2)) + tmp.rename(cursor_file) diff --git a/gr2/python_cli/failures.py b/gr2/python_cli/failures.py index c681774..2b0726a 100644 --- a/gr2/python_cli/failures.py +++ b/gr2/python_cli/failures.py @@ -5,7 +5,7 @@ from datetime import UTC, datetime from pathlib import Path -from .events import append_outbox_event +from .events import EventType, emit def _now_utc() -> str: @@ -78,13 +78,12 @@ def resolve_failure_marker( raise SystemExit(f"failure marker not found: {operation_id}") marker = json.loads(path.read_text()) path.unlink() - event = append_outbox_event( - workspace_root, - { - "type": "failure.resolved", - "workspace": workspace_root.name, - "actor": resolved_by, - "owner_unit": owner_unit, + emit( + event_type=EventType.FAILURE_RESOLVED, + workspace_root=workspace_root, + actor=resolved_by, + owner_unit=owner_unit, + payload={ "operation_id": operation_id, "resolved_by": resolved_by, "resolution": resolution, @@ -96,5 +95,4 @@ def resolve_failure_marker( "resolved_by": resolved_by, "resolution": resolution, "lane_name": marker.get("lane_name", ""), - "event_id": None if event is None else event["event_id"], } diff --git a/gr2/python_cli/pr.py b/gr2/python_cli/pr.py index 3ccd113..bbd6347 100644 --- a/gr2/python_cli/pr.py +++ b/gr2/python_cli/pr.py @@ -16,7 +16,7 @@ import os from pathlib import Path -from .events import emit, EventType +from .events import EventType, emit from .platform import AdapterError, CreatePRRequest, PlatformAdapter @@ -43,11 +43,12 @@ def _load_group(workspace_root: Path, pr_group_id: str) -> dict: return json.loads(path.read_text()) -def _save_group(workspace_root: Path, group: dict) -> None: +def _save_group(workspace_root: Path, group: dict) -> Path: d = _pr_groups_dir(workspace_root) d.mkdir(parents=True, exist_ok=True) path = d / f"{group['pr_group_id']}.json" path.write_text(json.dumps(group, indent=2)) + return path def create_pr_group( @@ -78,35 +79,30 @@ def create_pr_group( draft=draft, ) ref = adapter.create_pr(request) - prs.append({ - "repo": repo, - "pr_number": ref.number, - "url": ref.url, - }) + prs.append({"repo": repo, "pr_number": ref.number, "url": ref.url}) group = { "pr_group_id": pr_group_id, + "owner_unit": owner_unit, "lane_name": lane_name, "title": title, "base_branch": base_branch, "head_branch": head_branch, + "platform": getattr(adapter, "name", "github"), "prs": prs, "status": {repo: "OPEN" for repo in repos}, } - _save_group(workspace_root, group) + path = _save_group(workspace_root, group) emit( event_type=EventType.PR_CREATED, workspace_root=workspace_root, actor=actor, owner_unit=owner_unit, - payload={ - "pr_group_id": pr_group_id, - "lane_name": lane_name, - "repos": prs, - }, + payload={"pr_group_id": pr_group_id, "lane_name": lane_name, "repos": prs}, ) + group["state_path"] = str(path) return group @@ -146,10 +142,7 @@ def merge_pr_group( workspace_root=workspace_root, actor=actor, owner_unit=group.get("owner_unit", actor), - payload={ - "pr_group_id": pr_group_id, - "repos": merged, - }, + payload={"pr_group_id": pr_group_id, "repos": merged}, ) return group @@ -171,7 +164,6 @@ def check_pr_group_status( status = adapter.pr_status(repo, number) old_state = cached_status.get(repo, "OPEN") - # Detect state change (OPEN -> MERGED, OPEN -> CLOSED, etc.) if status.state != old_state: emit( event_type=EventType.PR_STATUS_CHANGED, @@ -188,7 +180,6 @@ def check_pr_group_status( ) cached_status[repo] = status.state - # Detect check results (only when checks are complete) if status.checks: completed = [c for c in status.checks if c.status == "COMPLETED"] if completed and len(completed) == len(status.checks): @@ -234,11 +225,7 @@ def record_pr_review( state: str, actor: str, ) -> None: - """Record an externally-submitted PR review and emit pr.review_submitted. - - Reviews come from outside gr2 (GitHub webhooks, human action, etc.). - The adapter doesn't query reviews, so this is a push-model entry point. - """ + """Record an externally-submitted PR review and emit pr.review_submitted.""" emit( event_type=EventType.PR_REVIEW_SUBMITTED, workspace_root=workspace_root, diff --git a/gr2/python_cli/spec_apply.py b/gr2/python_cli/spec_apply.py index 593bd4d..3715a4f 100644 --- a/gr2/python_cli/spec_apply.py +++ b/gr2/python_cli/spec_apply.py @@ -7,7 +7,7 @@ from datetime import UTC, datetime from pathlib import Path -from .events import append_outbox_event +from .events import EventType, emit from .gitops import clone_repo, ensure_repo_cache, is_git_dir, is_git_repo, repo_dirty from .hooks import HookContext, apply_file_projections, load_repo_hooks, run_lifecycle_stage @@ -281,13 +281,12 @@ def apply_plan(workspace_root: Path, *, yes: bool, manual_hooks: bool = False) - manual_hooks=manual_hooks, ) for projection in hook_payload["projected_files"]: - append_outbox_event( - workspace_root, - { - "type": "workspace.file_projected", - "workspace": workspace_root.name, - "actor": "system", - "owner_unit": "workspace", + emit( + event_type=EventType.WORKSPACE_FILE_PROJECTED, + workspace_root=workspace_root, + actor="system", + owner_unit="workspace", + payload={ "repo": str(repo_spec["name"]), "kind": projection["kind"], "src": projection["src"], @@ -321,15 +320,12 @@ def apply_plan(workspace_root: Path, *, yes: bool, manual_hooks: bool = False) - if applied: _record_apply_state(workspace_root, applied) if materialized_repos: - append_outbox_event( - workspace_root, - { - "type": "workspace.materialized", - "workspace": workspace_root.name, - "actor": "system", - "owner_unit": "workspace", - "repos": materialized_repos, - }, + emit( + event_type=EventType.WORKSPACE_MATERIALIZED, + workspace_root=workspace_root, + actor="system", + owner_unit="workspace", + payload={"repos": materialized_repos}, ) return { diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py index 55815a0..6c62acb 100644 --- a/gr2/python_cli/syncops.py +++ b/gr2/python_cli/syncops.py @@ -24,7 +24,7 @@ repo_dirty, stash_if_dirty, ) -from .events import append_outbox_event +from .events import EventType, emit from .hooks import load_repo_hooks from .spec_apply import ( ValidationIssue, @@ -204,12 +204,18 @@ def _sync_lock_file(workspace_root: Path) -> Path: return workspace_root / ".grip" / "state" / "sync.lock" -def _append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> None: - append_outbox_event(workspace_root, payload) - - def _emit_sync_event(workspace_root: Path, payload: dict[str, object]) -> None: - _append_outbox_event(workspace_root, payload) + event_type = EventType(str(payload.pop("type"))) + payload.pop("workspace", None) + agent_id = payload.pop("agent_id", None) + emit( + event_type=event_type, + workspace_root=workspace_root, + actor=str(payload.pop("actor")), + owner_unit=str(payload.pop("owner_unit")), + payload=payload, + agent_id=None if agent_id is None else str(agent_id), + ) def _sync_context(workspace_root: Path, *, actor: str = "system", owner_unit: str = "workspace") -> dict[str, object]: diff --git a/gr2/tests/test_sprint21_sync_platform.py b/gr2/tests/test_sprint21_sync_platform.py index bf81421..70050a8 100644 --- a/gr2/tests/test_sprint21_sync_platform.py +++ b/gr2/tests/test_sprint21_sync_platform.py @@ -595,12 +595,12 @@ def pr_checks(self, repo: str, number: int) -> list[PRCheck]: # pragma: no cove assert result.exit_code == 0 payload = json.loads(result.stdout) assert payload["pr_group_id"].startswith("pg_") - assert len(payload["refs"]) == 2 + assert len(payload["prs"]) == 2 group_path = workspace_root / ".grip" / "pr_groups" / f'{payload["pr_group_id"]}.json' assert group_path.exists(), "group state should be stored by pr_group_id, not lane name" stored = json.loads(group_path.read_text()) - assert {item["repo"]: item["number"] for item in stored["refs"]} == {"app": 41, "api": 42} + assert {item["repo"]: item["pr_number"] for item in stored["prs"]} == {"app": 41, "api": 42} def test_pr_status_aggregates_group_state(tmp_path: Path, monkeypatch) -> None: @@ -621,10 +621,11 @@ def test_pr_status_aggregates_group_state(tmp_path: Path, monkeypatch) -> None: "owner_unit": "atlas", "lane_name": "feat-router", "platform": "github", - "refs": [ - {"repo": "app", "number": 41, "url": "https://example.test/app/41"}, - {"repo": "api", "number": 42, "url": "https://example.test/api/42"}, + "prs": [ + {"repo": "app", "pr_number": 41, "url": "https://example.test/app/41"}, + {"repo": "api", "pr_number": 42, "url": "https://example.test/api/42"}, ], + "status": {"app": "OPEN", "api": "OPEN"}, } ) ) @@ -676,10 +677,11 @@ def test_pr_merge_reports_partial_failure_and_preserves_state(tmp_path: Path, mo "owner_unit": "atlas", "lane_name": "feat-router", "platform": "github", - "refs": [ - {"repo": "app", "number": 41, "url": "https://example.test/app/41"}, - {"repo": "api", "number": 42, "url": "https://example.test/api/42"}, + "prs": [ + {"repo": "app", "pr_number": 41, "url": "https://example.test/app/41"}, + {"repo": "api", "pr_number": 42, "url": "https://example.test/api/42"}, ], + "status": {"app": "OPEN", "api": "OPEN"}, } ) ) @@ -692,7 +694,9 @@ def create_pr(self, request: CreatePRRequest) -> PRRef: # pragma: no cover def merge_pr(self, repo: str, number: int) -> PRRef: if repo == "api": - raise RuntimeError("merge conflict") + from gr2.python_cli.platform import AdapterError + + raise AdapterError("merge conflict") return PRRef(repo=repo, number=number, url=f"https://example.test/{repo}/{number}") def pr_status(self, repo: str, number: int) -> PRStatus: # pragma: no cover