diff --git a/gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md b/gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md new file mode 100644 index 0000000..e6d8f71 --- /dev/null +++ b/gr2/docs/ASSESS-SYNC-ADVERSARIAL-SPECS.md @@ -0,0 +1,178 @@ +# Assess Sync Adversarial Specs + +Artifact 2 for the Sprint 20 sync lane. + +This document lists the failure-first specs the Python `gr2 sync` implementation +must satisfy before `sync run` is allowed to mutate workspace state. + +## 1. Missing Spec + +Preconditions: +- workspace has no `.grip/workspace_spec.toml` + +Trigger: +- `gr2 sync status ` + +Expected: +- command fails immediately +- error points to `gr2 workspace init` +- no cache, repo, lane, or event state is written + +## 2. Partial Clone Failure + +Preconditions: +- spec declares 3 repos +- repo A and B are reachable +- repo C remote is invalid or unavailable + +Trigger: +- `gr2 sync run` + +Expected: +- planner marks A/B runnable and C failing before execution starts +- execution stops on C if C is in the same phase batch +- result reports: + - A/B success or skipped state explicitly + - C as failure with repo-scoped error payload +- no successful repo update is silently rolled back +- event outbox records partial progress and terminal failure + +Invariant: +- sync never reports all-green on partial workspace failure + +## 3. Dirty Shared Repo + +Preconditions: +- shared repo checkout exists +- uncommitted changes in repo root + +Trigger: +- `gr2 sync status` + +Expected: +- issue `dirty_shared_repo` +- issue blocks sync +- planner does not schedule branch movement or fetch-dependent mutation through + the dirty checkout + +Invariant: +- dirty state wins over convenience + +## 4. Dirty Lane Checkout During Sync + +Preconditions: +- lane checkout exists +- lane repo has uncommitted changes + +Trigger: +- `gr2 sync status` + +Expected: +- issue `dirty_lane_repo` +- issue blocks sync +- planner may still inspect other repos, but lane mutation is blocked + +Invariant: +- lane-local work is never overwritten by workspace sync + +## 5. Conflicting Branch States Across Repos + +Preconditions: +- lane spans repos `app`, `api`, `premium` +- expected branch is `feat/auth` +- `app` is on `feat/auth` +- `api` is behind remote +- `premium` is on a different local branch + +Trigger: +- `gr2 sync status` + +Expected: +- planner reports repo-scoped branch inspection operations +- branch divergence appears as explicit sync issue, not implicit correction +- no automatic branch checkout/rebase in status mode + +Invariant: +- branch alignment must be explicit before mutation + +## 6. Shared Cache Path Conflict + +Preconditions: +- `.grip/cache/repos/.git` exists +- path is not a bare git directory + +Trigger: +- `gr2 sync status` + +Expected: +- issue `cache_path_conflict` +- sync blocks +- planner does not attempt to reuse or overwrite the invalid cache path + +## 7. Invalid Repo Hook Config + +Preconditions: +- shared repo has `.gr2/hooks.toml` +- file does not parse or violates schema + +Trigger: +- `gr2 sync status` + +Expected: +- spec validation fails before sync planning proceeds +- sync status returns blocked with the hook validation error included + +Invariant: +- repo hook errors fail fast at plan time + +## 8. Sync During Active Edit Lease + +Preconditions: +- lane has an active `edit` lease +- lane repo is otherwise clean + +Trigger: +- `gr2 sync run --lane ` + +Expected: +- sync refuses lane mutation for the leased lane +- non-lane workspace inspection may still succeed +- result clearly distinguishes lease-blocked lanes from unrelated workspace + status + +Invariant: +- sync does not tunnel through active edit occupancy + +## 9. Concurrent Sync From Two Worktrees + +Preconditions: +- same workspace available from two operator shells +- both invoke sync against overlapping repos + +Trigger: +- `gr2 sync run` concurrently + +Expected: +- shared mutable resources use explicit lock discipline +- losing side returns machine-readable contention error +- no cache corruption, no partially-written apply metadata + +Invariant: +- concurrency failure is reported, not hidden as random repo damage + +## 10. Platform Backend Failure + +Preconditions: +- `PlatformAdapter` backend is GitHub via `gh` +- `gh` auth is invalid or the command times out + +Trigger: +- sync planner tries to refresh PR/check state + +Expected: +- repo/local sync inspection still reports local status +- platform-dependent operations are marked degraded or failed +- failure is explicit in the result payload + +Invariant: +- adapter failure must not masquerade as clean workspace state diff --git a/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md new file mode 100644 index 0000000..578fd9c --- /dev/null +++ b/gr2/docs/PLATFORM-ADAPTER-AND-SYNC.md @@ -0,0 +1,241 @@ +# Platform Adapter And Sync + +Sprint 20 design lane for: + +- `PlatformAdapter` protocol +- GitHub-only shipping backend for `gr2 2.0` +- sync algorithm for cross-repo orchestration + +Required companion artifacts for this design: + +- adversarial failing specs: + [ASSESS-SYNC-ADVERSARIAL-SPECS.md](./ASSESS-SYNC-ADVERSARIAL-SPECS.md) +- failure/rollback contract: + [SYNC-FAILURE-CONTRACT.md](./SYNC-FAILURE-CONTRACT.md) + +## 1. Scope + +`gr2` owns cross-repo orchestration in OSS: + +- workspace spec +- materialization +- sync +- lanes +- aggregated status +- PR orchestration + +Single-repo git remains raw git. + +Platform integration is intentionally narrow: + +- ship GitHub only first +- hide platform details behind a protocol +- let future GitLab / Azure / Bitbucket adapters arrive later without changing `gr2` UX + +## 2. Adapter Contract + +`gr2/python_cli/platform.py` defines the protocol: + +- `create_pr` +- `merge_pr` +- `pr_status` +- `list_prs` +- `pr_checks` + +The CLI consumes the protocol only. It does not talk to GitHub directly. + +### Shipping backend + +The first backend is `GitHubAdapter`, implemented on top of `gh` CLI. + +Reasoning: + +- simplest path to production +- no custom API client to maintain +- reuses existing authenticated operator environment +- keeps platform logic thin while we prove the orchestration UX + +### Future plugin path + +The adapter boundary is intentionally protocol-shaped, not GitHub-shaped. + +That makes third-party adapters possible later: + +- config-based adapter selection +- module import / entry-point registration +- same `gr2` PR commands, different backend implementation + +## 3. Required Spawn-Readiness Seams + +For premium spawn to move on top of `gr2`, these are required: + +- hook invocation API with stable structured results +- workspace / lane event outbox +- leases and lane metadata +- `exec status` and `exec run` +- machine-readable failure surfaces + +These are not optional polish. They are spawn prerequisites. + +## 4. Sync Goals + +`sync` is the missing orchestration surface between: + +- spec/plan/apply +- lane state +- repo caches +- review/PR flow + +`sync` must be: + +- safe with dirty state +- lane-aware +- explicit about what it mutates +- resumable after partial failure +- explicit about lease-blocked lanes + +## 5. Sync Phases + +### Phase A: Inspect + +Read: + +- workspace spec +- shared repo cache state +- shared repo checkout state +- lane metadata +- lease state +- hook configs + +Emit a workspace-level snapshot: + +- missing repos +- stale caches +- dirty repos +- lane checkouts missing +- lane branches behind remote +- hook config errors + +### Phase B: Plan + +Build a sync plan with explicit operations: + +- refresh repo cache +- fast-forward shared repo +- materialize missing repo +- refresh lane branch +- block on dirty state +- block on conflicting lease +- surface manual action required + +No mutation yet. + +### Phase C: Execute + +Apply only safe operations by default: + +- fetch/update cache +- clone missing repo +- materialize missing lane checkout +- fast-forward clean branches + +Unsafe operations must block unless explicitly requested: + +- dirty shared repo +- dirty lane checkout +- branch divergence requiring merge/rebase +- hook failure with `on_failure = block` + +### Phase D: Emit + +Write: + +- structured sync result +- event outbox entries +- updated aggregated status snapshot + +This is the seam premium and QA will consume. + +## 6. Sync Safety Rules + +1. Dirty state is explicit, not implicit. + `sync` accepts `--dirty=stash|block|discard`. + Default is `stash`, per Sprint 20 ruling. + +2. Lanes are first-class. + `sync` must treat shared repos and lane checkouts differently. + +3. Shared repo cache is substrate, not UX. + Mutations there should be invisible unless they affect user work. + +4. Partial failure must be reportable. + Example: 3 of 5 repos updated, 1 blocked dirty, 1 platform failure. + +5. Event emission is part of correctness. + `sync` must emit enough machine-readable state for premium spawn and QA. + Emit failure does not block the parent operation. + +6. Terminal sync state is normalized. + `sync.completed` is the terminal event for success, blocked, failed, and + partial-failure outcomes. Intermediate contention may still emit + `sync.conflict`. + +## 7. Proposed Command Shapes + +Initial surfaces: + +- `gr2 sync status` +- `gr2 sync run` + +Possible later flags: + +- `--lane ` +- `--owner-unit ` +- `--refresh-prs` +- `--dirty=stash|block|discard` +- `--json` + +`sync status` should be the dry-run/default read path. + +`sync run` should consume the same planner output and execute allowed operations. + +## 8. Failure Scenarios The QA Arena Must Cover + +- dirty shared repo during sync +- dirty lane checkout during sync +- lane branch behind remote +- lane branch diverged from remote +- `gh` timeout during PR create/status +- partial repo refresh failure +- hook failure during sync-triggered materialization +- concurrent sync from two worktrees +- sync during active edit lease + +These are required Sprint 20 QA inputs, not later hardening. + +## 9. Implementation Ordering + +I agree with Layne's platform-first ordering, with one constraint: + +1. `PlatformAdapter` protocol + `GitHubAdapter` +2. sync algorithm design with event outbox requirements folded in +3. aggregated status +4. PR create/status/merge on the adapter +5. lane switch/list polish + +Rationale: + +- PR lifecycle should not be implemented before the adapter boundary exists +- sync and aggregated status share most of the same inspection model +- event outbox requirements need to be considered while designing sync, not bolted on later + +## 10. Non-Goals + +Not part of Sprint 20 `gr2` OSS: + +- single-repo git porcelain +- spawn/agent orchestration +- release flow +- multi-platform support beyond GitHub + +Those would either duplicate raw git or blur the OSS/premium boundary. diff --git a/gr2/docs/SYNC-FAILURE-CONTRACT.md b/gr2/docs/SYNC-FAILURE-CONTRACT.md new file mode 100644 index 0000000..eefc569 --- /dev/null +++ b/gr2/docs/SYNC-FAILURE-CONTRACT.md @@ -0,0 +1,165 @@ +# Sync Failure Contract + +Artifact 3 for the Sprint 20 sync lane. + +This contract defines what Python `gr2 sync` is allowed to do on failure, what +it must report, and what it must never attempt to hide. + +## 1. Core Rule + +`sync status` is read-only. + +`sync run` may mutate workspace state, but it must never pretend a partial +failure is a rollback-complete success. + +## 2. Mutation Model + +`sync` operates in ordered phases: + +1. inspect +2. plan +3. execute +4. emit result + outbox events + +Within a phase, successful mutations are durable unless the operation itself has +an explicit local rollback mechanism. + +Examples: +- a completed `git fetch` is durable +- a completed cache refresh is durable +- a completed clone is durable +- a completed branch checkout is durable + +These are not automatically rolled back just because a later repo fails. + +## 3. Default Failure Behavior + +On the first blocking failure in `sync run`: + +- stop scheduling new mutating operations in the current batch +- preserve already-completed successful operations +- report all completed work explicitly +- report the blocking failure explicitly +- write an event/outbox record describing the partial state + +The contract is: +- stop +- preserve +- report + +Not: +- guess +- continue blindly +- fabricate rollback + +## 4. Dirty State + +Dirty handling is explicit through `--dirty=stash|block|discard`. + +Default: +- `--dirty=stash` + +Behavior: +- `stash`: preserve local work by stashing it before sync mutation proceeds +- `block`: return a blocking dirty-state issue and do not mutate through that + checkout +- `discard`: explicitly discard local changes before sync mutation proceeds + +Rules: +- no implicit commit +- no dirty-state behavior outside the declared `--dirty` mode +- `discard` is always explicit and never the default + +## 5. Partial State Contract + +If `sync run` partially succeeds: + +- result status is `partial_failure` +- result contains: + - completed operations + - blocked operations + - failed operations + - unaffected operations, if known +- event outbox must include: + - `sync.started` + - one event per completed mutation + - terminal `sync.completed` with a `status` field describing the outcome + +Consumers must be able to reconstruct: +- what changed +- what did not change +- what needs human or agent follow-up + +## 6. Rollback Rules + +Default rule: +- no automatic workspace-wide rollback + +Reason: +- cross-repo rollback is not reliably safe +- later repos may fail after earlier repos perform valid, independent updates +- forcing rollback would risk clobbering legitimate state + +Allowed rollback only when all of the following are true: +- rollback scope is local to one operation +- rollback is deterministic +- rollback result can be verified immediately +- rollback failure is itself reportable + +Examples of acceptable local rollback candidates later: +- removing a just-created empty metadata file +- deleting a just-created lane marker that has no downstream references yet + +Examples not allowed by default: +- resetting git refs across multiple repos +- auto-restoring stashes across partially-mutated lane trees +- deleting refreshed caches because a later repo failed + +## 7. Error Reporting Contract + +Every blocking failure must carry: +- `code` +- `scope` +- `subject` +- human-readable `message` +- machine-readable `details` when available + +Every sync result must distinguish: +- `blocked` from policy/safety preconditions +- `failed` from runtime execution errors +- `partial_failure` from all-or-nothing failure + +## 8. Lease and Occupancy Contract + +If sync encounters an active conflicting lease: +- it is a blocker, not a warning +- sync does not override or steal the lease +- result points to the owning actor and lease mode when available +- `sync.conflict` is emitted with the blocking lease metadata +- terminal state still arrives through `sync.completed` with `status = "blocked"` + +If a stale lease policy is added later, it must be explicit and separately +authorized. It is not part of the default sync contract. + +## 9. Platform Adapter Failure Contract + +If the `PlatformAdapter` backend fails: +- local repo and lane inspection still completes when possible +- platform-derived fields are marked degraded/failed +- sync status must not silently omit missing platform data + +GitHub via `gh` is treated as an external dependency: +- failures are surfaced +- not normalized away + +## 10. Operator Expectations + +When `sync` fails, the operator should be able to answer: + +1. what changed? +2. what did not change? +3. what blocked the next step? +4. what is safe to retry? + +If the result payload cannot answer those four questions, the sync surface is +not ready for production mutation. diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py index 2876343..be29f21 100644 --- a/gr2/python_cli/app.py +++ b/gr2/python_cli/app.py @@ -11,6 +11,7 @@ from . import execops from . import migration +from . import syncops from .gitops import ( branch_exists, checkout_branch, @@ -38,6 +39,7 @@ workspace_app = typer.Typer(help="Workspace bootstrap and materialization") spec_app = typer.Typer(help="Declarative workspace spec operations") exec_app = typer.Typer(help="Lane-aware execution planning and execution") +sync_app = typer.Typer(help="Workspace-wide sync inspection and execution") app.add_typer(repo_app, name="repo") app.add_typer(lane_app, name="lane") @@ -46,6 +48,7 @@ app.add_typer(workspace_app, name="workspace") app.add_typer(spec_app, name="spec") app.add_typer(exec_app, name="exec") +app.add_typer(sync_app, name="sync") def _workspace_repo_spec(workspace_root: Path, repo_name: str) -> dict[str, object]: @@ -249,6 +252,38 @@ def _exit(code: int) -> None: raise typer.Exit(code=code) +@sync_app.command("status") +def sync_status( + workspace_root: Path, + dirty_mode: str = typer.Option("stash", "--dirty", help="Dirty-state handling: stash, block, or discard"), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Inspect workspace-wide sync readiness without mutating any repo state.""" + workspace_root = workspace_root.resolve() + plan = syncops.build_sync_plan(workspace_root, dirty_mode=dirty_mode) + if json_output: + typer.echo(json.dumps(plan.as_dict(), indent=2)) + return + typer.echo(syncops.render_sync_plan(plan)) + + +@sync_app.command("run") +def sync_run( + workspace_root: Path, + dirty_mode: str = typer.Option("stash", "--dirty", help="Dirty-state handling: stash, block, or discard"), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Execute the current sync plan, stopping on the first blocking runtime failure.""" + workspace_root = workspace_root.resolve() + result = syncops.run_sync(workspace_root, dirty_mode=dirty_mode) + if json_output: + typer.echo(json.dumps(result.as_dict(), indent=2)) + else: + typer.echo(syncops.render_sync_result(result)) + if result.status in {"blocked", "failed", "partial_failure"}: + raise typer.Exit(code=1) + + @workspace_app.command("init") def workspace_init( workspace_root: Path, diff --git a/gr2/python_cli/gitops.py b/gr2/python_cli/gitops.py index 8fca724..8b10d9b 100644 --- a/gr2/python_cli/gitops.py +++ b/gr2/python_cli/gitops.py @@ -32,6 +32,14 @@ def remote_origin_url(path: Path) -> str | None: return value or None +def current_head_sha(path: Path) -> str | None: + proc = git(path, "rev-parse", "HEAD") + if proc.returncode != 0: + return None + value = proc.stdout.strip() + return value or None + + def ensure_repo_cache(url: str, cache_repo_root: Path) -> bool: """Ensure a local bare mirror exists for a repo URL. @@ -145,6 +153,13 @@ def checkout_branch(repo_root: Path, branch: str) -> None: raise SystemExit(f"failed to checkout {branch} in {repo_root}:\n{proc.stderr or proc.stdout}") +def current_branch(repo_root: Path) -> str: + proc = git(repo_root, "branch", "--show-current") + if proc.returncode != 0: + raise SystemExit(f"failed to determine current branch in {repo_root}:\n{proc.stderr or proc.stdout}") + return proc.stdout.strip() + + def stash_if_dirty(repo_root: Path, message: str) -> bool: if not repo_dirty(repo_root): return False @@ -152,3 +167,15 @@ def stash_if_dirty(repo_root: Path, message: str) -> bool: if proc.returncode != 0: raise SystemExit(f"failed to stash dirty work in {repo_root}:\n{proc.stderr or proc.stdout}") return True + + +def discard_if_dirty(repo_root: Path) -> bool: + if not repo_dirty(repo_root): + return False + proc = git(repo_root, "reset", "--hard", "HEAD") + if proc.returncode != 0: + raise SystemExit(f"failed to discard tracked changes in {repo_root}:\n{proc.stderr or proc.stdout}") + proc = git(repo_root, "clean", "-fd") + if proc.returncode != 0: + raise SystemExit(f"failed to discard untracked changes in {repo_root}:\n{proc.stderr or proc.stdout}") + return True diff --git a/gr2/python_cli/platform.py b/gr2/python_cli/platform.py new file mode 100644 index 0000000..ee18a7f --- /dev/null +++ b/gr2/python_cli/platform.py @@ -0,0 +1,234 @@ +from __future__ import annotations + +import json +import shutil +import subprocess +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Protocol + + +@dataclass(frozen=True) +class PRRef: + repo: str + number: int | None = None + url: str | None = None + head_branch: str | None = None + base_branch: str | None = None + title: str | None = None + + def as_dict(self) -> dict[str, object]: + return asdict(self) + + +@dataclass(frozen=True) +class PRCheck: + name: str + status: str + conclusion: str | None = None + details_url: str | None = None + + def as_dict(self) -> dict[str, object]: + return asdict(self) + + +@dataclass(frozen=True) +class PRStatus: + ref: PRRef + state: str + mergeable: str | None = None + checks: list[PRCheck] = field(default_factory=list) + + def as_dict(self) -> dict[str, object]: + return { + "ref": self.ref.as_dict(), + "state": self.state, + "mergeable": self.mergeable, + "checks": [item.as_dict() for item in self.checks], + } + + +@dataclass(frozen=True) +class CreatePRRequest: + repo: str + title: str + body: str + head_branch: str + base_branch: str + draft: bool = False + + +class PlatformAdapter(Protocol): + """Protocol for platform-backed PR orchestration. + + gr2 owns the orchestration UX. Adapters hide the hosting platform backend. + """ + + name: str + + def create_pr(self, request: CreatePRRequest) -> PRRef: ... + + def merge_pr(self, repo: str, number: int) -> PRRef: ... + + def pr_status(self, repo: str, number: int) -> PRStatus: ... + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: ... + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: ... + + +class AdapterError(RuntimeError): + pass + + +def _run_json(command: list[str], *, cwd: Path | None = None) -> object: + proc = subprocess.run( + command, + cwd=cwd, + capture_output=True, + text=True, + check=False, + ) + if proc.returncode != 0: + raise AdapterError(proc.stderr.strip() or proc.stdout.strip() or f"command failed: {' '.join(command)}") + try: + return json.loads(proc.stdout) + except json.JSONDecodeError as exc: + raise AdapterError(f"command did not return valid json: {' '.join(command)}") from exc + + +class GitHubAdapter: + name = "github" + + def __init__(self, gh_binary: str = "gh") -> None: + if shutil.which(gh_binary) is None: + raise AdapterError(f"`{gh_binary}` not found in PATH") + self.gh_binary = gh_binary + + def create_pr(self, request: CreatePRRequest) -> PRRef: + cmd = [ + self.gh_binary, + "pr", + "create", + "--repo", + request.repo, + "--title", + request.title, + "--body", + request.body, + "--head", + request.head_branch, + "--base", + request.base_branch, + ] + if request.draft: + cmd.append("--draft") + proc = subprocess.run(cmd, capture_output=True, text=True, check=False) + if proc.returncode != 0: + raise AdapterError(proc.stderr.strip() or proc.stdout.strip() or "gh pr create failed") + url = proc.stdout.strip() + return PRRef( + repo=request.repo, + url=url or None, + head_branch=request.head_branch, + base_branch=request.base_branch, + title=request.title, + ) + + def merge_pr(self, repo: str, number: int) -> PRRef: + proc = subprocess.run( + [self.gh_binary, "pr", "merge", str(number), "--repo", repo], + capture_output=True, + text=True, + check=False, + ) + if proc.returncode != 0: + raise AdapterError(proc.stderr.strip() or proc.stdout.strip() or "gh pr merge failed") + return PRRef(repo=repo, number=number) + + def pr_status(self, repo: str, number: int) -> PRStatus: + payload = _run_json( + [ + self.gh_binary, + "pr", + "view", + str(number), + "--repo", + repo, + "--json", + "number,url,headRefName,baseRefName,title,state,mergeable,statusCheckRollup", + ] + ) + assert isinstance(payload, dict) + checks = self._parse_checks(payload.get("statusCheckRollup") or []) + ref = PRRef( + repo=repo, + number=payload.get("number"), + url=payload.get("url"), + head_branch=payload.get("headRefName"), + base_branch=payload.get("baseRefName"), + title=payload.get("title"), + ) + return PRStatus( + ref=ref, + state=str(payload.get("state", "UNKNOWN")), + mergeable=str(payload.get("mergeable")) if payload.get("mergeable") is not None else None, + checks=checks, + ) + + def list_prs(self, repo: str, *, head_branch: str | None = None) -> list[PRRef]: + payload = _run_json( + [ + self.gh_binary, + "pr", + "list", + "--repo", + repo, + "--json", + "number,url,headRefName,baseRefName,title", + ] + ) + assert isinstance(payload, list) + refs: list[PRRef] = [] + for item in payload: + if not isinstance(item, dict): + continue + if head_branch and item.get("headRefName") != head_branch: + continue + refs.append( + PRRef( + repo=repo, + number=item.get("number"), + url=item.get("url"), + head_branch=item.get("headRefName"), + base_branch=item.get("baseRefName"), + title=item.get("title"), + ) + ) + return refs + + def pr_checks(self, repo: str, number: int) -> list[PRCheck]: + return self.pr_status(repo, number).checks + + @staticmethod + def _parse_checks(rows: list[object]) -> list[PRCheck]: + checks: list[PRCheck] = [] + for row in rows: + if not isinstance(row, dict): + continue + checks.append( + PRCheck( + name=str(row.get("name", "unknown")), + status=str(row.get("status", "UNKNOWN")), + conclusion=(str(row["conclusion"]) if row.get("conclusion") is not None else None), + details_url=row.get("detailsUrl"), + ) + ) + return checks + + +def get_platform_adapter(name: str) -> PlatformAdapter: + normalized = name.strip().lower() + if normalized in {"github", "gh"}: + return GitHubAdapter() + raise AdapterError(f"unknown platform adapter: {name}") diff --git a/gr2/python_cli/syncops.py b/gr2/python_cli/syncops.py new file mode 100644 index 0000000..664d82a --- /dev/null +++ b/gr2/python_cli/syncops.py @@ -0,0 +1,835 @@ +from __future__ import annotations + +import dataclasses +import fcntl +import json +import os +from pathlib import Path +from datetime import UTC, datetime + +from gr2.prototypes import lane_workspace_prototype as lane_proto + +from .gitops import ( + clone_repo, + current_branch, + current_head_sha, + discard_if_dirty, + ensure_lane_checkout, + ensure_repo_cache, + is_git_dir, + is_git_repo, + repo_dirty, + stash_if_dirty, +) +from .hooks import load_repo_hooks +from .spec_apply import ( + ValidationIssue, + _run_materialize_hooks, + _find_repo, + _record_apply_state, + load_workspace_spec_doc, + repo_cache_path, + validate_spec, + workspace_spec_path, +) + + +SYNC_ROLLBACK_CONTRACT = ( + "sync preserves completed operations, stops on blocking failure, and reports partial state explicitly; " + "it does not attempt automatic cross-repo rollback" +) +VALID_DIRTY_MODES = {"stash", "block", "discard"} + + +@dataclasses.dataclass(frozen=True) +class SyncIssue: + level: str + code: str + scope: str + subject: str + message: str + blocks: bool + path: str | None = None + details: dict[str, object] = dataclasses.field(default_factory=dict) + + def as_dict(self) -> dict[str, object]: + return dataclasses.asdict(self) + + +@dataclasses.dataclass(frozen=True) +class SyncOperation: + kind: str + scope: str + subject: str + target_path: str + reason: str + details: dict[str, object] = dataclasses.field(default_factory=dict) + + def as_dict(self) -> dict[str, object]: + return dataclasses.asdict(self) + + +@dataclasses.dataclass(frozen=True) +class SyncPlan: + workspace_root: str + spec_path: str + status: str + dirty_mode: str + dirty_targets: list[str] + issues: list[SyncIssue] + operations: list[SyncOperation] + + def as_dict(self) -> dict[str, object]: + return { + "workspace_root": self.workspace_root, + "spec_path": self.spec_path, + "status": self.status, + "dirty_mode": self.dirty_mode, + "dirty_targets": list(self.dirty_targets), + "issue_count": len(self.issues), + "operation_count": len(self.operations), + "issues": [item.as_dict() for item in self.issues], + "operations": [item.as_dict() for item in self.operations], + } + + +@dataclasses.dataclass(frozen=True) +class SyncResult: + workspace_root: str + status: str + plan_status: str + dirty_mode: str + dirty_targets: list[str] + applied: list[str] + blocked: list[SyncIssue] + failures: list[SyncIssue] + rollback_contract: str + operation_id: str | None = None + + def as_dict(self) -> dict[str, object]: + return { + "workspace_root": self.workspace_root, + "status": self.status, + "plan_status": self.plan_status, + "dirty_mode": self.dirty_mode, + "dirty_targets": list(self.dirty_targets), + "applied": list(self.applied), + "blocked": [item.as_dict() for item in self.blocked], + "failures": [item.as_dict() for item in self.failures], + "rollback_contract": self.rollback_contract, + "operation_id": self.operation_id, + } + + +def _spec_issue_to_sync(issue: ValidationIssue) -> SyncIssue: + return SyncIssue( + level=issue.level, + code=issue.code, + scope="workspace_spec", + subject=issue.path or "workspace_spec", + message=issue.message, + blocks=issue.level == "error", + path=issue.path, + ) + + +def _iter_lane_docs(workspace_root: Path) -> list[tuple[str, str, dict[str, object]]]: + lanes_root = workspace_root / "agents" + docs: list[tuple[str, str, dict[str, object]]] = [] + if not lanes_root.exists(): + return docs + for owner_dir in sorted(lanes_root.iterdir()): + lane_parent = owner_dir / "lanes" + if not lane_parent.is_dir(): + continue + for lane_dir in sorted(lane_parent.iterdir()): + lane_toml = lane_dir / "lane.toml" + if not lane_toml.exists(): + continue + try: + doc = lane_proto.load_lane_doc(workspace_root, owner_dir.name, lane_dir.name) + except Exception as exc: # pragma: no cover - defensive against prototype parser issues + docs.append( + ( + owner_dir.name, + lane_dir.name, + { + "lane_name": lane_dir.name, + "owner_unit": owner_dir.name, + "_load_error": str(exc), + }, + ) + ) + continue + docs.append((owner_dir.name, lane_dir.name, doc)) + return docs + + +def _status_from_issues(issues: list[SyncIssue]) -> str: + if any(item.blocks for item in issues): + return "blocked" + if issues: + return "attention" + return "ready" + + +def _dirty_targets(issues: list[SyncIssue], operations: list[SyncOperation]) -> list[str]: + targets: list[str] = [] + for issue in issues: + if issue.code in {"dirty_shared_repo", "dirty_lane_repo"}: + targets.append(issue.subject) + for op in operations: + if op.kind in {"stash_dirty_repo", "discard_dirty_repo"}: + targets.append(op.subject) + return sorted(dict.fromkeys(targets)) + + +def _normalize_dirty_mode(dirty_mode: str) -> str: + normalized = dirty_mode.strip().lower() + if normalized not in VALID_DIRTY_MODES: + raise SystemExit(f"invalid --dirty value '{dirty_mode}'; expected one of: stash, block, discard") + return normalized + + +def _operation_id() -> str: + return os.urandom(8).hex() + + +def _now_utc() -> str: + return datetime.now(UTC).isoformat() + + +def _events_dir(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" + + +def _outbox_file(workspace_root: Path) -> Path: + return _events_dir(workspace_root) / "outbox.jsonl" + + +def _outbox_lock_file(workspace_root: Path) -> Path: + return _events_dir(workspace_root) / "outbox.lock" + + +def _sync_lock_file(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "state" / "sync.lock" + + +def _append_outbox_event(workspace_root: Path, payload: dict[str, object]) -> None: + outbox_path = _outbox_file(workspace_root) + lock_path = _outbox_lock_file(workspace_root) + outbox_path.parent.mkdir(parents=True, exist_ok=True) + lock_path.parent.mkdir(parents=True, exist_ok=True) + try: + with lock_path.open("a+", encoding="utf-8") as lock_fh: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX) + seq = 1 + if outbox_path.exists(): + with outbox_path.open("r", encoding="utf-8") as existing: + for line in existing: + line = line.strip() + if not line: + continue + try: + row = json.loads(line) + except json.JSONDecodeError: + continue + value = int(row.get("seq", 0)) + if value >= seq: + seq = value + 1 + event = { + "seq": seq, + "event_id": os.urandom(8).hex(), + "timestamp": _now_utc(), + **payload, + } + with outbox_path.open("a", encoding="utf-8") as fh: + fh.write(json.dumps(event) + "\n") + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) + except OSError: + return + + +def _emit_sync_event(workspace_root: Path, payload: dict[str, object]) -> None: + _append_outbox_event(workspace_root, payload) + + +def build_sync_plan(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncPlan: + workspace_root = workspace_root.resolve() + dirty_mode = _normalize_dirty_mode(dirty_mode) + spec_path = workspace_spec_path(workspace_root) + if not spec_path.exists(): + raise SystemExit( + f"workspace spec not found: {spec_path}\n" + "run `gr2 workspace init ` first or create .grip/workspace_spec.toml explicitly" + ) + + issues: list[SyncIssue] = [] + operations: list[SyncOperation] = [] + + issues.extend(_spec_issue_to_sync(issue) for issue in validate_spec(workspace_root)) + if any(item.blocks for item in issues): + return SyncPlan( + workspace_root=str(workspace_root), + spec_path=str(spec_path), + status=_status_from_issues(issues), + dirty_mode=dirty_mode, + dirty_targets=[], + issues=issues, + operations=operations, + ) + + spec = load_workspace_spec_doc(workspace_root) + for repo in spec.get("repos", []): + repo_name = str(repo["name"]) + repo_root = workspace_root / str(repo["path"]) + cache_root = repo_cache_path(workspace_root, repo_name) + + if not cache_root.exists(): + operations.append( + SyncOperation( + kind="seed_repo_cache", + scope="repo_cache", + subject=repo_name, + target_path=str(cache_root), + reason="shared repo cache missing", + details={"url": str(repo["url"])}, + ) + ) + elif not is_git_dir(cache_root): + issues.append( + SyncIssue( + level="error", + code="cache_path_conflict", + scope="repo_cache", + subject=repo_name, + message=f"repo cache path exists but is not a bare git dir: {cache_root}", + blocks=True, + path=str(cache_root), + ) + ) + else: + operations.append( + SyncOperation( + kind="refresh_repo_cache", + scope="repo_cache", + subject=repo_name, + target_path=str(cache_root), + reason="shared repo cache present; refresh remote state", + details={"url": str(repo["url"])}, + ) + ) + + if not repo_root.exists(): + operations.append( + SyncOperation( + kind="clone_shared_repo", + scope="shared_repo", + subject=repo_name, + target_path=str(repo_root), + reason="shared repo checkout missing", + details={"url": str(repo["url"])}, + ) + ) + elif not is_git_repo(repo_root): + issues.append( + SyncIssue( + level="error", + code="shared_repo_path_conflict", + scope="shared_repo", + subject=repo_name, + message=f"shared repo path exists but is not a git repo: {repo_root}", + blocks=True, + path=str(repo_root), + ) + ) + else: + if repo_dirty(repo_root): + if dirty_mode == "block": + issues.append( + SyncIssue( + level="error", + code="dirty_shared_repo", + scope="shared_repo", + subject=repo_name, + message=f"shared repo has uncommitted changes and blocks sync: {repo_root}", + blocks=True, + path=str(repo_root), + details={"dirty_mode": dirty_mode}, + ) + ) + else: + operations.append( + SyncOperation( + kind="stash_dirty_repo" if dirty_mode == "stash" else "discard_dirty_repo", + scope="shared_repo", + subject=repo_name, + target_path=str(repo_root), + reason=f"shared repo is dirty and will be handled via --dirty={dirty_mode}", + details={"dirty_mode": dirty_mode}, + ) + ) + hooks = load_repo_hooks(repo_root) + if hooks: + operations.append( + SyncOperation( + kind="evaluate_repo_hooks", + scope="shared_repo", + subject=repo_name, + target_path=str(repo_root), + reason="repo hook config present; sync must account for lifecycle/policy rules", + details={"hook_config": str(repo_root / ".gr2" / "hooks.toml")}, + ) + ) + + for owner_unit, lane_name, lane_doc in _iter_lane_docs(workspace_root): + if lane_doc.get("_load_error"): + issues.append( + SyncIssue( + level="error", + code="lane_doc_load_failed", + scope="lane", + subject=f"{owner_unit}/{lane_name}", + message=f"failed to load lane metadata: {lane_doc['_load_error']}", + blocks=True, + path=str(workspace_root / "agents" / owner_unit / "lanes" / lane_name / "lane.toml"), + ) + ) + continue + + lane_root = lane_proto.lane_dir(workspace_root, owner_unit, lane_name) + active_leases = [ + lease + for lease in lane_proto.load_lane_leases(workspace_root, owner_unit, lane_name) + if not lane_proto.is_stale_lease(lease) + ] + if active_leases: + issues.append( + SyncIssue( + level="error", + code="lease_blocked_sync", + scope="lane", + subject=f"{owner_unit}/{lane_name}", + message=f"lane has active leases that block sync mutation: {owner_unit}/{lane_name}", + blocks=True, + path=str(workspace_root / "agents" / owner_unit / "lanes" / lane_name), + details={ + "leases": [ + {"actor": lease["actor"], "mode": lease["mode"], "acquired_at": lease["acquired_at"]} + for lease in active_leases + ] + }, + ) + ) + + for repo_name in lane_doc.get("repos", []): + lane_repo_root = lane_root / "repos" / str(repo_name) + expected_branch = str(dict(lane_doc.get("branch_map", {})).get(repo_name, "")) + if not lane_repo_root.exists(): + operations.append( + SyncOperation( + kind="materialize_lane_repo", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + target_path=str(lane_repo_root), + reason="lane checkout missing", + details={"expected_branch": expected_branch}, + ) + ) + continue + if not is_git_repo(lane_repo_root): + issues.append( + SyncIssue( + level="error", + code="lane_repo_path_conflict", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + message=f"lane repo path exists but is not a git repo: {lane_repo_root}", + blocks=True, + path=str(lane_repo_root), + ) + ) + continue + if repo_dirty(lane_repo_root): + if dirty_mode == "block": + issues.append( + SyncIssue( + level="error", + code="dirty_lane_repo", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + message=f"lane repo has uncommitted changes and blocks sync: {lane_repo_root}", + blocks=True, + path=str(lane_repo_root), + details={"expected_branch": expected_branch, "dirty_mode": dirty_mode}, + ) + ) + else: + operations.append( + SyncOperation( + kind="stash_dirty_repo" if dirty_mode == "stash" else "discard_dirty_repo", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + target_path=str(lane_repo_root), + reason=f"lane repo is dirty and will be handled via --dirty={dirty_mode}", + details={"expected_branch": expected_branch, "dirty_mode": dirty_mode}, + ) + ) + operations.append( + SyncOperation( + kind="inspect_lane_repo_branch", + scope="lane", + subject=f"{owner_unit}/{lane_name}:{repo_name}", + target_path=str(lane_repo_root), + reason="lane checkout present; verify branch alignment before any sync run", + details={"expected_branch": expected_branch}, + ) + ) + + return SyncPlan( + workspace_root=str(workspace_root), + spec_path=str(spec_path), + status=_status_from_issues(issues), + dirty_mode=dirty_mode, + dirty_targets=_dirty_targets(issues, operations), + issues=issues, + operations=operations, + ) + + +def render_sync_plan(plan: SyncPlan) -> str: + lines = [ + "SyncPlan", + f"workspace_root = {plan.workspace_root}", + f"status = {plan.status}", + f"dirty_mode = {plan.dirty_mode}", + f"issue_count = {len(plan.issues)}", + f"operation_count = {len(plan.operations)}", + ] + if plan.dirty_targets: + lines.append("DIRTY_TARGETS") + lines.extend(f"- {item}" for item in plan.dirty_targets) + if plan.issues: + lines.append("ISSUES") + for issue in plan.issues: + subject = f" [{issue.subject}]" if issue.subject else "" + lines.append(f"- {issue.level}:{issue.code}{subject} {issue.message}") + if plan.operations: + lines.append("OPERATIONS") + for op in plan.operations: + lines.append(f"- {op.kind} [{op.scope}] {op.subject} -> {op.target_path} ({op.reason})") + return "\n".join(lines) + + +def sync_status_payload(workspace_root: Path) -> dict[str, object]: + return build_sync_plan(workspace_root).as_dict() + + +def sync_status_json(workspace_root: Path) -> str: + return json.dumps(sync_status_payload(workspace_root), indent=2) + + +def _issue_from_exception(op: SyncOperation, exc: BaseException) -> SyncIssue: + message = str(exc).strip() or f"sync operation failed: {op.kind}" + return SyncIssue( + level="error", + code=f"{op.kind}_failed", + scope=op.scope, + subject=op.subject, + message=message, + blocks=True, + path=op.target_path, + details={"operation": op.kind}, + ) + + +def _execute_operation(workspace_root: Path, spec: dict[str, object], op: SyncOperation) -> str: + target_path = Path(op.target_path) + before_sha = current_head_sha(target_path) if op.scope in {"shared_repo", "lane"} and target_path.exists() else None + if op.kind in {"seed_repo_cache", "refresh_repo_cache"}: + repo_spec = _find_repo(spec, op.subject) + cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) + created = ensure_repo_cache(str(repo_spec["url"]), cache_path) + if op.kind == "seed_repo_cache": + return f"seeded repo cache for '{op.subject}' at {cache_path}" + if created: + return f"seeded repo cache for '{op.subject}' at {cache_path}" + return f"refreshed repo cache for '{op.subject}' at {cache_path}" + + if op.kind == "clone_shared_repo": + repo_spec = _find_repo(spec, op.subject) + repo_root = workspace_root / str(repo_spec["path"]) + cache_path = repo_cache_path(workspace_root, str(repo_spec["name"])) + first_materialize = clone_repo(str(repo_spec["url"]), repo_root, reference_repo_root=cache_path) + _run_materialize_hooks(workspace_root, repo_root, str(repo_spec["name"]), first_materialize, manual_hooks=False) + after_sha = current_head_sha(repo_root) + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_updated", + "repo": op.subject, + "scope": "shared_repo", + "old_sha": before_sha, + "new_sha": after_sha, + }, + ) + return f"cloned shared repo '{op.subject}' into {repo_root}" + + if op.kind == "evaluate_repo_hooks": + repo_root = Path(op.target_path) + hooks = load_repo_hooks(repo_root) + if hooks: + return f"validated repo hooks for '{op.subject}'" + return f"no repo hooks for '{op.subject}'" + + if op.kind == "materialize_lane_repo": + owner_and_lane, repo_name = op.subject.split(":", 1) + owner_unit, lane_name = owner_and_lane.split("/", 1) + repo_spec = _find_repo(spec, repo_name) + source_repo_root = workspace_root / str(repo_spec["path"]) + target_repo_root = Path(op.target_path) + expected_branch = str(op.details.get("expected_branch", "")) + first_materialize = ensure_lane_checkout( + source_repo_root=source_repo_root, + target_repo_root=target_repo_root, + branch=expected_branch, + ) + _run_materialize_hooks(workspace_root, target_repo_root, repo_name, first_materialize, manual_hooks=False) + after_sha = current_head_sha(target_repo_root) + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_updated", + "repo": repo_name, + "scope": "lane", + "owner_unit": owner_unit, + "lane": lane_name, + "old_sha": before_sha, + "new_sha": after_sha, + "branch": expected_branch, + }, + ) + return f"materialized lane repo '{op.subject}' at {target_repo_root}" + + if op.kind == "inspect_lane_repo_branch": + expected_branch = str(op.details.get("expected_branch", "")).strip() + repo_root = Path(op.target_path) + actual_branch = current_branch(repo_root) + if expected_branch and actual_branch != expected_branch: + raise SystemExit( + f"lane repo branch mismatch for {op.subject}: expected {expected_branch}, found {actual_branch}" + ) + return f"verified lane branch for '{op.subject}' ({actual_branch or '-'})" + + if op.kind == "stash_dirty_repo": + repo_root = Path(op.target_path) + if stash_if_dirty(repo_root, f"gr2 sync auto-stash: {op.subject}"): + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_skipped", + "repo": op.subject.split(":")[-1], + "scope": op.scope, + "reason": "dirty_stashed", + }, + ) + return f"stashed dirty repo state for '{op.subject}'" + return f"repo already clean for '{op.subject}'" + + if op.kind == "discard_dirty_repo": + repo_root = Path(op.target_path) + if discard_if_dirty(repo_root): + _emit_sync_event( + workspace_root, + { + "type": "sync.repo_skipped", + "repo": op.subject.split(":")[-1], + "scope": op.scope, + "reason": "dirty_discarded", + }, + ) + return f"discarded dirty repo state for '{op.subject}'" + return f"repo already clean for '{op.subject}'" + + raise SystemExit(f"unsupported sync operation kind: {op.kind}") + + +def _acquire_sync_lock(workspace_root: Path): + lock_path = _sync_lock_file(workspace_root) + lock_path.parent.mkdir(parents=True, exist_ok=True) + lock_fh = lock_path.open("a+", encoding="utf-8") + try: + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) + except BlockingIOError: + lock_fh.close() + return None + return lock_fh + + +def _release_sync_lock(lock_fh) -> None: + if lock_fh is None: + return + fcntl.flock(lock_fh.fileno(), fcntl.LOCK_UN) + lock_fh.close() + + +def run_sync(workspace_root: Path, *, dirty_mode: str = "stash") -> SyncResult: + workspace_root = workspace_root.resolve() + dirty_mode = _normalize_dirty_mode(dirty_mode) + operation_id = _operation_id() + lock_fh = _acquire_sync_lock(workspace_root) + if lock_fh is None: + blocked_issue = SyncIssue( + level="error", + code="sync_lock_held", + scope="workspace", + subject=str(workspace_root), + message="another sync run currently holds the workspace lock", + blocks=True, + path=str(_sync_lock_file(workspace_root)), + details={"operation_id": operation_id}, + ) + _emit_sync_event( + workspace_root, + { + "type": "sync.conflict", + "operation_id": operation_id, + "reason": "lock_held", + "workspace_root": str(workspace_root), + }, + ) + return SyncResult( + workspace_root=str(workspace_root), + status="blocked", + plan_status="blocked", + dirty_mode=dirty_mode, + dirty_targets=[], + applied=[], + blocked=[blocked_issue], + failures=[], + rollback_contract=SYNC_ROLLBACK_CONTRACT, + operation_id=operation_id, + ) + + _emit_sync_event( + workspace_root, + { + "type": "sync.started", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "dirty_mode": dirty_mode, + }, + ) + plan = build_sync_plan(workspace_root, dirty_mode=dirty_mode) + blocked = [issue for issue in plan.issues if issue.blocks] + if blocked: + for issue in blocked: + if issue.code == "lease_blocked_sync": + _emit_sync_event( + workspace_root, + { + "type": "sync.conflict", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "reason": "active_lease", + "subject": issue.subject, + "leases": issue.details.get("leases", []), + }, + ) + _emit_sync_event( + workspace_root, + { + "type": "sync.completed", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "status": "blocked", + "blocked_codes": [item.code for item in blocked], + }, + ) + _release_sync_lock(lock_fh) + return SyncResult( + workspace_root=str(workspace_root), + status="blocked", + plan_status=plan.status, + dirty_mode=dirty_mode, + dirty_targets=list(plan.dirty_targets), + applied=[], + blocked=blocked, + failures=[], + rollback_contract=SYNC_ROLLBACK_CONTRACT, + operation_id=operation_id, + ) + + spec = load_workspace_spec_doc(workspace_root) + applied: list[str] = [] + failures: list[SyncIssue] = [] + try: + for op in plan.operations: + try: + applied.append(_execute_operation(workspace_root, spec, op)) + except Exception as exc: + failures.append(_issue_from_exception(op, exc)) + break + + if applied: + _record_apply_state(workspace_root, applied) + + status = "success" + if failures and applied: + status = "partial_failure" + elif failures: + status = "failed" + + _emit_sync_event( + workspace_root, + { + "type": "sync.completed", + "operation_id": operation_id, + "workspace_root": str(workspace_root), + "status": status, + "applied_count": len(applied), + "failure_codes": [item.code for item in failures], + }, + ) + + return SyncResult( + workspace_root=str(workspace_root), + status=status, + plan_status=plan.status, + dirty_mode=dirty_mode, + dirty_targets=list(plan.dirty_targets), + applied=applied, + blocked=[], + failures=failures, + rollback_contract=SYNC_ROLLBACK_CONTRACT, + operation_id=operation_id, + ) + finally: + _release_sync_lock(lock_fh) + + +def render_sync_result(result: SyncResult) -> str: + lines = [ + "SyncResult", + f"workspace_root = {result.workspace_root}", + f"status = {result.status}", + f"plan_status = {result.plan_status}", + f"dirty_mode = {result.dirty_mode}", + f"operation_id = {result.operation_id or '-'}", + f"applied_count = {len(result.applied)}", + f"failure_count = {len(result.failures)}", + ] + if result.dirty_targets: + lines.append("DIRTY_TARGETS") + lines.extend(f"- {item}" for item in result.dirty_targets) + if result.applied: + lines.append("APPLIED") + lines.extend(f"- {item}" for item in result.applied) + if result.blocked: + lines.append("BLOCKED") + lines.extend(f"- {item.code}: {item.message}" for item in result.blocked) + if result.failures: + lines.append("FAILURES") + lines.extend(f"- {item.code}: {item.message}" for item in result.failures) + lines.append(f"rollback_contract = {result.rollback_contract}") + return "\n".join(lines)