diff --git a/.gr2/hooks.toml b/.gr2/hooks.toml new file mode 100644 index 0000000..3576411 --- /dev/null +++ b/.gr2/hooks.toml @@ -0,0 +1,24 @@ +[repo] +name = "grip" + +[[files.link]] +src = "{workspace_root}/config/claude.md" +dest = "{repo_root}/CLAUDE.md" +if_exists = "overwrite" + +[[lifecycle.on_materialize]] +name = "cargo-build" +command = "cargo build" +cwd = "{repo_root}" +when = "first_materialize" +on_failure = "block" + +[[lifecycle.on_enter]] +name = "cargo-check" +command = "cargo check" +cwd = "{repo_root}" +when = "always" +on_failure = "warn" + +[policy] +required_reviewers = 1 diff --git a/gr2/docs/AUDIT-GR-VS-GR2.md b/gr2/docs/AUDIT-GR-VS-GR2.md new file mode 100644 index 0000000..8cdac82 --- /dev/null +++ b/gr2/docs/AUDIT-GR-VS-GR2.md @@ -0,0 +1,177 @@ +# Audit: `gr` vs `gr2` + +This document compares the current shipped `gr` surface against the current +`gr2` surfaces. + +The goal is not to declare parity prematurely. The goal is to identify: + +- what `gr` already does in production +- what `gr2` already proves +- what remains missing before `gr2` can replace `gr` +- which runtime is authoritative during the transition + +## 1. Current Roles + +### `gr` + +`gr` is the current production multi-repo tool. + +It is broad, mature, and still required for daily workflow coverage across: + +- repo bootstrap and migration +- cross-repo git workflow +- PR and issue workflow +- agent orchestration +- channel integration +- release and CI operations + +### `gr2` + +`gr2` has the better long-term architecture, but today it is split across two +surfaces: + +- Rust `gr2` + - registry/spec/plan/apply backbone +- Python `gr2` + - active UX proving layer for lanes, hooks, workspace bootstrap, and real git behavior + +## 2. Transition Decision + +This is the operating rule for the project: + +- Python `gr2` is the active UX authority now. +- Rust `gr2` is the future backend/runtime replacement. + +That means: + +- new user-facing command design should happen in Python first +- Python command names, config shapes, and event schemas are the contract +- Rust should reimplement the proven contract later, not invent a second UX + +## 3. Command Matrix + +Status meanings: + +- `Production` — usable in `gr` today +- `Shipped (Python)` — available in Python `gr2` +- `Shipped (Rust)` — available in Rust `gr2` +- `Partial` — exists but does not yet cover the full workflow +- `Missing` — no real replacement yet + +| Workflow | `gr` | Python `gr2` | Rust `gr2` | Audit | +|---|---|---|---|---| +| Workspace bootstrap from existing repos | `gr init --from-dirs`, `gr migrate ...` | `workspace init` | `init` only creates empty workspace | `gr2` partial | +| Workspace materialization from spec | `gr sync` / manifest flow | `workspace materialize` | `apply` | `gr2` partial, split across runtimes | +| Show repo maintenance state | `status` | `repo status` | `repo status` | `gr2` shipped | +| Inspect repo hook config | no first-class equivalent | `repo hooks` | missing | `gr2` Python-only | +| Create task context / lane | no first-class lane model | `lane create` | `lane create` | `gr2` shipped | +| Enter active task context | no first-class lane enter | `lane enter` | missing | Python-only | +| Exit active task context | no first-class lane exit | `lane exit` | missing | Python-only | +| Recover current context | ad hoc (`gr status`, branch state) | `lane current` | missing | Python-only | +| Lease/occupancy control | no first-class equivalent | `lane lease acquire/release/show` | missing | Python-only | +| Lane-aware execution planning | indirect | missing in CLI, prototypes exist | `exec status` | split/incomplete | +| Lane-aware execution run | indirect | missing | missing | missing | +| Review requirement check | indirect/manual | `review requirements` | missing | Python-only | +| Declarative spec show/validate | limited via manifest | missing | `spec show`, `spec validate` | Rust-only | +| Plan workspace drift | implicit in sync/status | missing | `plan` | Rust-only | +| Apply workspace drift | `sync` | missing as explicit command | `apply` | Rust-only | +| Real git lane checkout creation | no lane model | shipped | missing | Python-only | +| Hook-driven file projections | `link`/manifest model | shipped | missing | Python-only | +| Hook-driven lifecycle (`on_materialize`, `on_enter`, `on_exit`) | ad hoc scripts | shipped | missing | Python-only | +| Branch create/switch across repos | `branch`, `checkout` | partial via lane branch intent + git checkout in lane flow | missing | `gr` still primary | +| Stage / restore / diff / commit / push | shipped | missing | missing | `gr` only | +| PR create / merge / review / checks | shipped | missing | missing | `gr` only | +| Issue workflow | shipped | missing | missing | `gr` only | +| Group / target / cache / gc | shipped | missing | partial (`repo status` sees cache-style model only) | `gr` only | +| Tree / griptree workflow | shipped | missing | missing | `gr` only | +| Spawn / dashboard / channel | shipped | missing from CLI, only prototype seam docs | missing | `gr` only | +| Release / CI / bench / verify | shipped | missing | missing | `gr` only | + +## 4. What `gr2` Already Proves + +Even though `gr2` is not feature-complete, it already proves the more coherent +workspace model: + +- lanes are the primary working surface +- leases make occupancy explicit +- review requirements can be enforced from compiled constraints +- hooks travel with repos via `.gr2/hooks.toml` +- workspace bootstrap and materialization can be expressed as a clean spec flow +- real git worktrees/checkouts can be created and managed from the lane model + +This is the architectural advantage over `gr`. + +## 5. What Still Keeps `gr` Necessary + +Today the team still needs `gr` for normal production work because `gr2` does +not yet cover: + +- daily cross-repo branch / commit / push flows +- PR creation / merge / review workflow +- issue workflow +- tree/griptree lifecycle +- agent spawn / mission control / channel operations +- release/CI surfaces + +The practical result is: + +- `gr` remains the production multitool +- `gr2` remains the proving path for the replacement model + +## 6. Biggest Current Problem + +The biggest current `gr2` problem is not missing ideas. It is split authority. + +Right now: + +- Rust `gr2` owns spec/plan/apply/registry concepts +- Python `gr2` owns the best real UX and real hook/git behavior + +That is acceptable during transition, but not as a steady state. + +## 7. Recommended Structure + +### Near-term + +- Python `gr2` defines the user-facing interface +- Rust `gr2` should not grow competing UX nouns +- use Python to prove: + - command names + - hook schema + - lane semantics + - workspace bootstrap/materialization behavior + +### Mid-term + +- port proven Python commands behind the same interface into Rust +- keep the Python CLI as the compatibility oracle during the port + +### Long-term + +- Rust becomes the backend/runtime implementation +- Python stops being the primary runtime, but not before command parity exists + +## 8. Replacement Rule + +`gr2` does not replace `gr` when it matches one subsystem. + +`gr2` replaces `gr` only when one coherent `gr2` surface can cover: + +- workspace init/materialize +- repo status/hooks +- lane create/enter/exit/current/lease +- spec/plan/apply +- review requirements +- the minimum daily repo workflow needed by the team + +Until then, we should be explicit: + +- use `gr` for broad production workflow +- use Python `gr2` to prove the replacement model + +## 9. Immediate Next Steps + +1. Keep Python `gr2` as the UX authority. +2. Add missing daily-workflow surfaces in Python before porting them. +3. Avoid adding overlapping user-facing Rust commands unless they are direct ports. +4. Maintain this matrix as the transition scoreboard. diff --git a/gr2/docs/HOOK-CONFIG-MODEL.md b/gr2/docs/HOOK-CONFIG-MODEL.md new file mode 100644 index 0000000..56314ca --- /dev/null +++ b/gr2/docs/HOOK-CONFIG-MODEL.md @@ -0,0 +1,575 @@ +# gr2 Hook-Based Repo Config + +This document defines the hook-based configuration model for Python-first +`gr2`. + +The design goal is: + +- keep the workspace spec bare +- let each repo carry its own materialization and lifecycle behavior +- make Python `gr2` the first production UX surface +- preserve a clean migration path from `gr1` to Python `gr2` to Rust `gr2` + +The important boundary is: + +- workspace config says **what exists** +- repo hook config says **how this repo behaves when materialized** + +## 1. Why This Model + +The current `gr` manifest mixes several concerns: + +- repo registry +- workspace-level link/copy behavior +- workspace hooks +- agent/runtime configuration + +That made the workspace manifest too heavy and too central. It also means repo +behavior is hard to move with the repo. + +The hook-based model changes that: + +- `WorkspaceSpec` stays narrow and stable +- repo-local `.gr2/hooks.toml` travels with the repo +- materialization becomes a small orchestrator that reads repo contracts +- Python `gr2` can prove the UX before we freeze implementation in Rust + +## 2. Core Model + +### 2.1 Bare WorkspaceSpec + +The workspace file only declares: + +- workspace identity +- repos +- units +- optional workspace-wide constraints compiled from premium + +Example: + +```toml +workspace_name = "synapt-codex" + +[[repos]] +name = "grip" +path = "repos/grip" +url = "git@github.com:synapt-dev/grip.git" + +[[repos]] +name = "synapt" +path = "repos/synapt" +url = "git@github.com:synapt-dev/synapt.git" + +[[repos]] +name = "synapt-private" +path = "repos/synapt-private" +url = "git@github.com:synapt-dev/synapt-private.git" + +[[units]] +name = "atlas" +path = "agents/atlas" +agent_id = "atlas-agent" +repos = ["grip", "synapt", "synapt-private"] + +[[units]] +name = "apollo" +path = "agents/apollo" +agent_id = "apollo-agent" +repos = ["grip", "synapt", "synapt-private"] + +[workspace_constraints] +max_concurrent_edit_leases_global = 2 + +[workspace_constraints.required_reviewers] +grip = 1 +synapt = 1 +synapt-private = 2 +``` + +Rules: + +- no linkfile/copyfile definitions here +- no repo-local lifecycle commands here +- no org logic here +- compiled workspace-wide constraints are allowed here + +### 2.2 Repo-Local Hook File + +Each repo may provide: + +- `.gr2/hooks.toml` + +This file defines repo-local: + +- file projections +- lifecycle hooks +- repo policies +- optional tool/runtime defaults + +If the file is absent, the repo is treated as having no special behavior. + +## 3. Hook Schema + +The starting schema should be small and explicit. + +```toml +version = 1 + +[repo] +name = "synapt" + +[[files.link]] +src = "CLAUDE.md" +dest = "{workspace_root}/CLAUDE.md" +if_exists = "error" + +[[files.copy]] +src = ".env.example" +dest = "{unit_root}/repos/synapt/.env.example" +if_exists = "error" + +[[lifecycle.on_materialize]] +name = "editable-install" +command = "uv pip install -e ." +cwd = "{repo_root}" +when = "first_materialize" +on_failure = "block" + +[[lifecycle.on_enter]] +name = "show-dev-hints" +command = "python scripts/dev_hints.py" +cwd = "{repo_root}" +when = "always" +on_failure = "warn" + +[[lifecycle.on_exit]] +name = "cleanup-temp-state" +command = "python scripts/cleanup.py" +cwd = "{repo_root}" +when = "dirty" +on_failure = "warn" + +[policy] +required_reviewers = 1 +allow_lane_kinds = ["feature", "review"] +preferred_exec = ["pytest -q"] +``` + +### 3.1 Supported Sections + +Initial sections: + +- `[repo]` +- `[[files.link]]` +- `[[files.copy]]` +- `[[lifecycle.on_materialize]]` +- `[[lifecycle.on_enter]]` +- `[[lifecycle.on_exit]]` +- `[policy]` + +Possible later sections: + +- `[exec]` +- `[tooling]` +- `[[lifecycle.on_review_start]]` +- `[[lifecycle.on_review_complete]]` + +### 3.2 File Projection Conflict Policy + +Each file projection must define how conflicts are handled with: + +- `if_exists = "skip" | "overwrite" | "merge" | "error"` + +Default: + +- `error` + +Meaning: + +- `skip` + - leave the existing destination untouched +- `overwrite` + - replace the destination with the source +- `merge` + - delegate to a merge-capable projection handler for supported file types +- `error` + - fail materialization instead of silently colliding + +This matters immediately because multiple repos may want to project to the +same workspace path, for example `CLAUDE.md`. + +### 3.3 Lifecycle `when` Semantics + +The initial `when` values are: + +- `first_materialize` + - run only when the repo is being materialized into this workspace target for + the first time +- `always` + - run every time the lifecycle stage is reached +- `dirty` + - run only when the repo has uncommitted local state, including tracked + modifications, staged changes, or untracked files +- `manual` + - never run automatically; only run when the user explicitly requests the + hook or hook group + +### 3.4 Hook Failure Policy + +Each lifecycle hook may define: + +- `on_failure = "block" | "warn" | "skip"` + +Default behavior: + +- `on_materialize` + - `block` +- `on_enter` + - `warn` +- `on_exit` + - `warn` +- file projections + - `block` + +Meaning: + +- `block` + - stop the current operation with a failure +- `warn` + - record the failure and continue +- `skip` + - do not treat failure as an error and continue silently except for logging + +These defaults are deliberate: + +- broken repo setup during materialization should stop early +- broken enter/exit hooks should not trap users outside their lane +- broken file projections should not fail silently + +### 3.5 Template Variables + +Allowed interpolation variables: + +- `{workspace_root}` +- `{unit_root}` +- `{lane_root}` +- `{repo_root}` +- `{repo_name}` +- `{lane_owner}` +- `{lane_subject}` +- `{lane_name}` + +Rules: + +- interpolation is explicit, not shell-magic +- undefined variables are validation errors +- paths resolve before command execution + +## 4. Materialization Flow + +`gr2 apply` should use the following flow: + +1. read `WorkspaceSpec` +2. materialize shared cache / repo source if configured +3. materialize unit-local or lane-local working checkouts +4. for each materialized repo root, read `.gr2/hooks.toml` if present +5. apply file projections +6. run `on_materialize` hooks +7. write local state/logs + +`gr2 lane enter` should: + +1. resolve current lane root +2. for each repo in scope, read `.gr2/hooks.toml` +3. run `on_enter` hooks +4. emit lane-enter event + +`gr2 lane exit` should: + +1. run `on_exit` hooks for repos in scope +2. emit lane-exit event + +Important rule: + +- hook config is consumed by the workspace orchestrator +- repo code never has to know about units, lanes, or org logic beyond the + interpolated local paths and lane names it is given + +## 5. Example Repo Hooks + +These are grounded in the actual repos we have. + +### 5.1 `grip` + +`grip` is the workspace router. Its repo-local hooks should stay light. + +Example `.gr2/hooks.toml`: + +```toml +version = 1 + +[repo] +name = "grip" + +[policy] +required_reviewers = 1 +allow_lane_kinds = ["feature", "review", "scratch"] +preferred_exec = ["cargo test --quiet"] + +[[lifecycle.on_materialize]] +name = "cargo-check" +command = "cargo check -q" +cwd = "{repo_root}" +when = "manual" +on_failure = "warn" +``` + +Why: + +- `grip` should not auto-run expensive hooks on every enter +- repo policy can still declare reviewer count and preferred test surface + +### 5.2 `synapt` + +`synapt` is Python and often needs an editable install in the active +workspace. + +Example: + +```toml +version = 1 + +[repo] +name = "synapt" + +[policy] +required_reviewers = 1 +allow_lane_kinds = ["feature", "review", "scratch"] +preferred_exec = ["pytest tests/ -q"] + +[[lifecycle.on_materialize]] +name = "editable-install" +command = "uv pip install -e ." +cwd = "{repo_root}" +when = "first_materialize" +on_failure = "block" + +[[lifecycle.on_enter]] +name = "workspace-doctor" +command = "python -m synapt.doctor --workspace {workspace_root}" +cwd = "{repo_root}" +when = "manual" +``` + +Why: + +- this repo frequently suffers from stale editable install drift +- making that behavior repo-local is better than hiding it in a workspace-level + manifest + +### 5.3 `synapt-private` + +`synapt-private` already carries private config and has stronger review needs. + +Example: + +```toml +version = 1 + +[repo] +name = "synapt-private" + +[policy] +required_reviewers = 2 +allow_lane_kinds = ["feature", "review"] +preferred_exec = ["pytest tests/ -q"] + +[[files.link]] +src = "config/models.json" +dest = "{lane_root}/repos/synapt-private/.gr2-linked/models.json" +if_exists = "error" + +[[lifecycle.on_materialize]] +name = "editable-install" +command = "uv pip install -e ." +cwd = "{repo_root}" +when = "first_materialize" +on_failure = "block" + +[[lifecycle.on_enter]] +name = "validate-private-config" +command = "python scripts/validate_config.py" +cwd = "{repo_root}" +when = "manual" +``` + +Why: + +- stronger reviewer requirements belong in repo policy +- private config validation should travel with the repo +- the workspace should not need to know what model files matter here + +## 6. What Materialization Actually Does + +For a lane touching `grip`, `synapt`, and `synapt-private`, `gr2` should: + +1. create the lane root +2. materialize the lane-local or unit-local checkouts +3. load `.gr2/hooks.toml` from each checkout in `[[repos]]` declaration order +4. apply file actions declared by each repo +5. run `on_materialize` for first-time repo setup +6. record what ran in `.grip/state/` + +That means: + +- the workspace orchestrator remains generic +- repo-specific behavior travels with the repo +- the repo author owns the repo’s materialization contract + +## 7. Workspace vs Repo Responsibility + +### WorkspaceSpec owns + +- repo list +- unit list +- lane ownership model +- workspace-wide constraints +- materialization topology + +### Repo hook config owns + +- repo-local file projections +- repo-local lifecycle behavior +- repo-local policy defaults +- repo-local preferred commands + +### Premium owns + +- durable identity +- org roles +- entitlements +- compilation of org/policy into workspace constraints + +## 8. Migration Path + +We need one migration story, not three separate products. + +### 8.1 `gr1 -> Python gr2` + +Phase 1: + +- keep `gr1` alive +- introduce Python `gr2` alongside it +- use Python `gr2` for lane, lease, and materialization UX +- continue reading existing repo state during transition + +Goal: + +- UX migration first +- not backend migration first + +### 8.2 Python gr2 -> Rust gr2 + +The Rust port should be a backend swap, not a user-facing redesign. + +That means Python `gr2` must already define: + +- CLI nouns +- config schema +- event formats +- lane semantics +- hook semantics + +Rust then reimplements: + +- parsing +- execution +- performance-sensitive paths + +But keeps: + +- command names +- config shapes +- event schema +- lane semantics + +### 8.3 Compatibility Rule + +If moving from Python `gr2` to Rust `gr2` requires users to relearn the model, +the migration failed. + +## 8.4 `agents.toml` Relationship + +Current `agents.toml` should be treated as input to the compilation step, not +as a parallel runtime authority once Python `gr2` is active. + +Recommended direction: + +- `agents.toml` remains a premium/control-plane input during transition +- compilation resolves it into: + - workspace `units` + - `agent_id` + - repo access + - lane limits +- `WorkspaceSpec` becomes the OSS runtime contract + +## 9. Python-First CLI Implication + +The Python CLI should present the same nouns we intend to keep: + +- `gr2 repo status` +- `gr2 lane create` +- `gr2 lane enter` +- `gr2 lane lease acquire` +- `gr2 review requirements` +- `gr2 apply` + +It should be a real CLI, not just prototype scripts. + +The point is: + +- validate the UX with real use +- identify hot paths +- only then move those paths to Rust + +Current decision: + +- Python `gr2` is the active UX authority +- Rust `gr2` is the future backend/runtime target + +If the two surfaces disagree, Python wins until the Rust port catches up. + +## 10. Open Questions + +These still need prototype pressure: + +1. Should file actions run against unit-local roots, lane-local roots, or both? +2. Which lifecycle hooks are safe by default versus manual-only? +3. How do hook failures interact with `apply`? + - block + - warn + - retry +4. Do we want per-repo review requirements only, or repo+path granularity later? +5. How do repo hooks compose with shared cache-backed materialization? + +## 11. Recommended Next Step + +Prototype this model in Python before touching Rust: + +1. add `.gr2/hooks.toml` parser +2. add a minimal Python `gr2` CLI surface +3. implement: + - file projection + - `on_materialize` + - `on_enter` + - `on_exit` +4. validate with: + - `grip` + - `synapt` + - `synapt-private` + +The success condition is simple: + +- the workspace spec gets smaller +- repo behavior becomes more portable +- the team can use Python `gr2` daily without needing a second UX migration diff --git a/gr2/docs/SYNAPT-INTEGRATION.md b/gr2/docs/SYNAPT-INTEGRATION.md new file mode 100644 index 0000000..f7e71a4 --- /dev/null +++ b/gr2/docs/SYNAPT-INTEGRATION.md @@ -0,0 +1,458 @@ +# Synapt Integration + +This document defines the integration surface between: + +- Premium + - durable identity + - org policy + - entitlements + - control-plane compilation +- OSS gr2 + - local workspace materialization + - unit and lane enforcement + - lane events + - execution surfaces +- OSS recall + - indexing and querying neutral lane event history + +The key rule is simple: + +- Premium compiles. +- OSS consumes. + +`gr2` should feel native inside a Synapt workspace, but it must not absorb +Premium-only identity or org logic. + +## 1. Architecture Overview + +The data flow is: + +```text +Premium org config + identity + policy + entitlements + -> compiler + -> WorkspaceSpec + lane/unit constraints + -> gr2 workspace materialization + lane enforcement + -> lane event log + -> recall indexing + channel bridge +``` + +Operationally: + +1. Premium resolves durable agent identity and workspace assignment. +2. Premium compiles org rules into workspace-scoped constraints. +3. `gr2` materializes the workspace and enforces unit/lane behavior locally. +4. `gr2` emits neutral lane events. +5. Recall indexes those events into searchable lane history. +6. The channel bridge derives `#dev`-style notifications from the same event log. + +The important layering rule: + +- Premium is the source of truth for identity, org policy, and entitlement + evaluation. +- `gr2` is the source of truth for local workspace state and lane execution. +- Recall is the source of truth for searchable event history derived from local + workspace events. + +## 2. Identity Binding Contract + +### What Premium Provides + +Premium resolves: + +- `handle` +- `persistent_id` +- workspace membership +- workspace assignment +- `owner_unit` +- role +- repo scope +- lane limits + +This binding is workspace-scoped. The same persistent agent may map to +different `owner_unit` names in different workspaces. + +Example: + +- `opus` with `persistent_id = agent_opus_abc123` + - `owner_unit = synapt-core` in workspace `ws_synapt_core` + - `owner_unit = editorial-opus` in workspace `ws_blog` + +Reassignment is also Premium-owned: + +- Premium may change `opus` from `synapt-core` to `release-control` +- `gr2` does not infer or own that change +- `gr2` simply consumes the recompiled workspace view + +### What gr2 Consumes + +`gr2` consumes a workspace-scoped unit record: + +```json +{ + "name": "release-control", + "path": "agents/release-control", + "agent_id": "agent_opus_abc123", + "repos": ["grip", "premium", "recall"], + "constraints": { + "lane_limit": 2 + } +} +``` + +Rules: + +- `agent_id` is opaque attribution data in OSS +- `owner_unit` is the local workspace identity +- `gr2` must not perform org resolution from `agent_id` +- `gr2` must not infer cross-workspace identity mapping + +Living prototype: + +- [identity_unit_binding.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/identity_unit_binding.py) + +## 3. Org/Policy Compilation + +### Premium Input Schema + +Premium evaluates: + +- `team_id` +- workspace id and name +- repo set +- agent roster +- roles +- entitlements +- policy rules + +The prototype models: + +- global max concurrent edit leases +- role-based repo access +- lane naming convention +- required reviewers per repo + +Example premium-side input shape: + +```json +{ + "team_id": "team_synapt_core", + "workspace_id": "ws_synapt_core", + "repos": ["grip", "premium", "recall", "config", "tests"], + "agents": [ + { + "handle": "opus", + "persistent_id": "agent_opus_abc123", + "role": "builder", + "entitlements": ["premium", "channels", "recall", "multi_lane"], + "owner_unit": "release-control" + } + ], + "policy": { + "max_concurrent_edit_leases_global": 2, + "lane_naming_convention": "-", + "required_reviewers": { + "premium": 2, + "grip": 1 + } + } +} +``` + +### OSS Output Schema + +Premium compiles that into a workspace-scoped `WorkspaceSpec` fragment plus +unit constraints: + +```json +{ + "workspace_name": "synapt-core", + "workspace_id": "ws_synapt_core", + "repos": [ + {"name": "grip", "path": "repos/grip"} + ], + "units": [ + { + "name": "release-control", + "path": "agents/release-control", + "agent_id": "agent_opus_abc123", + "repos": ["grip", "premium", "recall"], + "constraints": { + "lane_limit": 3, + "allowed_lane_kinds": ["feature", "review", "scratch"], + "channels_enabled": true, + "recall_enabled": true + } + } + ], + "workspace_constraints": { + "max_concurrent_edit_leases_global": 2, + "lane_naming_convention": "-", + "required_reviewers": { + "premium": 2, + "grip": 1 + } + } +} +``` + +### Scenarios The Compiler Must Handle + +The prototype covers: + +1. Baseline org + - 3 agents + - 5 repos + - max 2 concurrent edit leases globally +2. Role-based repo access + - builders get all repos + - QA gets test-focused access only +3. Repo update mid-sprint + - new repo added + - recompile updates affected units +4. Entitlement downgrade + - premium removed + - unit degrades gracefully to OSS defaults + +Important downgrade rule: + +- Premium decides the degradation policy +- `gr2` only enforces the compiled downgraded result + +Living prototype: + +- [org_policy_compiler.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/org_policy_compiler.py) + +## 4. Lane Event -> Recall Pipeline + +`gr2` emits neutral lane events into: + +- `.grip/events/lane_events.jsonl` + +Example event: + +```json +{ + "type": "lane_enter", + "agent": "agent:atlas", + "agent_id": "agent_atlas_ghi789", + "owner_unit": "design-research", + "lane": "auth-refactor", + "lane_type": "feature", + "repos": ["grip", "premium"], + "timestamp": "2026-04-12T14:06:45+00:00", + "event_id": "47db552da9a1535c" +} +``` + +Recall consumes these events without importing Premium semantics. + +### Indexing Surface + +The recall prototype indexes the event log: + +- by lane +- by actor +- by repo +- by time range + +### Query Interface + +Examples: + +- `lane_history("auth-refactor")` +- `actor_history("agent:atlas")` +- `repo_activity("grip")` +- `time_range(start, end)` + +These support queries like: + +- “what happened on the auth-refactor lane last week?” +- “what lanes did atlas touch?” +- “who last worked in grip?” + +Living prototype: + +- [recall_lane_history.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/recall_lane_history.py) + +## 5. Channel Bridge + +Lane events also feed a channel bridge. + +### Recommended Model + +The recommended model is watcher-first, not synchronous posting. + +Watcher flow: + +```text +lane_events.jsonl + -> watcher cursor + -> channel outbox + -> channel transport +``` + +Why watcher-first: + +- lane transitions remain durable even if channel delivery is down +- replay is resumable from the append-only event log +- dedupe is explicit through `event_id` +- channel posting does not block lane transitions + +### Outbox Format + +The bridge produces channel-compatible rows such as: + +```json +{ + "type": "channel_post", + "channel": "#dev", + "delivery": "watcher", + "source_event_id": "47db552da9a1535c", + "source_event_type": "lane_enter", + "agent": "agent:atlas", + "agent_id": "agent_atlas_ghi789", + "owner_unit": "design-research", + "lane": "auth-refactor", + "lane_type": "feature", + "repos": ["grip", "premium"], + "message": "agent:atlas entered design-research/auth-refactor [feature] repos=grip,premium", + "timestamp": "2026-04-12T14:06:47+00:00" +} +``` + +The watcher keeps cursor state in: + +- `.grip/events/channel_bridge.cursor.json` + +and writes outbox rows to: + +- `.grip/events/channel_outbox.jsonl` + +Living prototype: + +- [channel_lane_bridge.py](/Users/layne/Development/synapt-codex/atlas-gr2-playground-stack/gr2/prototypes/channel_lane_bridge.py) + +## 6. Lane Lifecycle Invariants + +The lane model needs three additional invariants to survive real +human/agent collaboration. + +### 6.1 Handoff Uses Continuation Lanes + +Agent-to-agent relay should not let the target agent execute inside the source +unit's lane root. + +Rules: + +- cross-unit shared working lanes are not the handoff model +- handoff creates a continuation lane under the target unit +- continuation lanes preserve source linkage, but give the target unit: + - its own lane root + - its own lease scope + - its own current-lane state + +Implication: + +- handoff preserves the unit-scoping invariant +- shared cross-unit lane execution does not + +### 6.2 Identity Rebinding Freezes And Continues + +When Premium recompiles an agent from one unit to another while live lanes +exist: + +- old lanes stay where they are +- old lanes become frozen +- active leases under the old unit are force-released +- old-unit exec planning is blocked +- resumption happens through continuation lanes under the new unit + +The minimal contract gr2 needs from Premium is: + +- same `agent_id` continuity +- explicit `old_owner_unit -> new_owner_unit` mapping +- `pending_reassignment` hint recommended + +Implication: + +- gr2 does not silently move or rename active lane roots +- rebind is a freeze-and-relay flow, not a mutation-in-place flow + +### 6.3 Workspace Constraints Are Enforced Locally + +Premium compiles workspace-wide constraints into the spec. gr2 enforces the +compiled result without importing org logic. + +The current prototype proves two critical cases: + +- `max_concurrent_edit_leases_global` + - enforced across all units in the workspace + - a third edit lease is blocked once the workspace cap is reached + - force-breaking a stale local lease does not bypass the global cap +- `required_reviewers` + - evaluated per repo and PR from review-lane state + - `check-review-requirements` reports satisfied vs unsatisfied based on the + compiled count + +Implication: + +- workspace-wide coordination rules can remain Premium-owned in definition +- OSS can still enforce the compiled constraint deterministically + +## 7. Premium Boundary Rules + +These rules should remain hard. + +### Must Stay In Premium + +- persistent agent identity +- org membership +- role evaluation +- entitlement evaluation +- workspace assignment +- reassignment history +- policy compilation +- reviewer policy semantics +- degradation policy for loss of premium + +### Can Live In OSS + +- workspace-scoped unit records +- lane metadata +- lease enforcement +- workspace-wide constraint enforcement from compiled spec +- review-requirement satisfaction checks from compiled spec +- lane events +- event indexing +- channel outbox derivation +- local execution planning +- local materialization of compiled constraints + +### Must Not Happen + +- `gr2` must not decide who an agent is +- `gr2` must not resolve org role semantics +- `gr2` must not invent workspace policy not present in the compiled spec +- recall must not require Premium logic to answer lane-history queries +- channel bridge must not depend on synchronous control-plane availability + +## Prototype References + +Living examples for this integration layer: + +- [identity_unit_binding.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/identity_unit_binding.py) +- [org_policy_compiler.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/org_policy_compiler.py) +- [recall_lane_history.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/recall_lane_history.py) +- [lane_workspace_prototype.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/lane_workspace_prototype.py) +- [cross_mode_lane_stress.py](/Users/layne/Development/synapt-codex/atlas-gr2-identity-org/gr2/prototypes/cross_mode_lane_stress.py) +- [channel_lane_bridge.py](/Users/layne/Development/synapt-codex/atlas-gr2-playground-stack/gr2/prototypes/channel_lane_bridge.py) + +These prototypes are still part of the loop: + +- `(design -> prototype -> verify)^n` +- `build` +- `assess` +- `repeat` + +They should remain the seam-definition reference until the production +implementation lands. diff --git a/gr2/prototypes/README.md b/gr2/prototypes/README.md index 8e8fb5d..00b6a09 100644 --- a/gr2/prototypes/README.md +++ b/gr2/prototypes/README.md @@ -255,6 +255,121 @@ This is still prototype scope, but it tests the right product direction: lane transitions and lease changes should be observable workspace events rather than invisible local state. +Agent handoff example: + +```bash +python3 gr2/prototypes/lane_workspace_prototype.py share-lane \ + /path/to/workspace atlas feat-router apollo + +python3 gr2/prototypes/lane_workspace_prototype.py plan-handoff \ + /path/to/workspace atlas feat-router apollo --mode shared --json + +python3 gr2/prototypes/lane_workspace_prototype.py create-continuation-lane \ + /path/to/workspace atlas feat-router apollo feat-router-relay + +python3 gr2/prototypes/lane_workspace_prototype.py plan-handoff \ + /path/to/workspace atlas feat-router apollo --mode continuation \ + --target-lane-name feat-router-relay --json +``` + +Current prototype conclusion: + +- cross-unit shared-lane relay violates the unit-scoping invariant +- continuation lanes preserve unit-scoped cwd, lease scope, and current-lane state +- handoff should preserve lineage to the source lane without forcing the target + unit to execute inside the source unit's lane root + +Identity rebind example: + +```bash +python3 gr2/prototypes/lane_workspace_prototype.py rebind-unit \ + /path/to/workspace synapt-core release-control --actor premium:control-plane --json +``` + +Current prototype conclusion: + +- active lanes under the old unit stay in place and become frozen +- active leases are force-released and logged during the rebind +- old-unit exec planning is blocked after rebind +- recovery should happen through continuation lanes under the new unit +- the minimal safe contract from premium is: + - same `agent_id` continuity + - explicit old -> new unit mapping + - pending-reassignment hint is recommended to reduce operator surprise + +Identity -> unit binding example: + +```bash +python3 gr2/prototypes/identity_unit_binding.py demo +python3 gr2/prototypes/identity_unit_binding.py resolve-binding ws_synapt_core opus --json +python3 gr2/prototypes/identity_unit_binding.py compile-workspace ws_synapt_core --json +``` + +This prototype keeps the premium boundary hard: + +- Premium owns persistent agent identity, org membership, and workspace assignment. +- gr2 only consumes the compiled workspace-scoped unit view. +- the same persistent agent can map to different `owner_unit` names in different + workspaces without gr2 learning org logic. +- reassignment is a premium recompilation event, not a gr2-side identity + decision. + +Org/policy compiler example: + +```bash +python3 gr2/prototypes/org_policy_compiler.py demo +python3 gr2/prototypes/org_policy_compiler.py compile --scenario baseline --json +python3 gr2/prototypes/org_policy_compiler.py compile --scenario repo-update --json +python3 gr2/prototypes/org_policy_compiler.py compile --scenario downgrade --json +``` + +This prototype keeps the compiler seam explicit: + +- Premium reads org config, roles, entitlements, and reviewer policy. +- Premium outputs workspace-scoped constraints that gr2 can enforce locally. +- gr2 sees unit repo access, lane limits, and workspace constraints, but not the + raw org policy logic that produced them. + +Workspace constraint enforcement example: + +```bash +python3 gr2/prototypes/lane_workspace_prototype.py acquire-lane-lease \ + /path/to/workspace atlas feat-a --actor agent:atlas --mode edit --json + +python3 gr2/prototypes/lane_workspace_prototype.py check-review-requirements \ + /path/to/workspace premium 777 --json +``` + +Current prototype conclusion: + +- `max_concurrent_edit_leases_global` is enforced across all units, not just + the requesting unit +- a stale local lease can be force-broken, but that does not bypass the + workspace-wide edit cap +- `required_reviewers` is evaluated from compiled workspace constraints plus + review-lane state +- OSS enforces the compiled constraint, but Premium still owns the policy that + produced it + +Recall lane history example: + +```bash +python3 gr2/prototypes/recall_lane_history.py demo-data /tmp/gr2-recall-demo +python3 gr2/prototypes/recall_lane_history.py query /tmp/gr2-recall-demo --lane auth-refactor --json +python3 gr2/prototypes/recall_lane_history.py query /tmp/gr2-recall-demo --actor agent:atlas --json +python3 gr2/prototypes/recall_lane_history.py query /tmp/gr2-recall-demo --repo grip --json +``` + +This prototype indexes lane events into a neutral recall-friendly timeline: + +- by lane +- by actor +- by repo +- by time range + +Recall can answer lane-history questions from structured workspace events +without importing premium identity or org semantics. + ## Real-Git Same-Repo Multi-Agent Materialization To verify that unit-local-first is real and not only metadata, run: diff --git a/gr2/prototypes/cross_mode_lane_stress.py b/gr2/prototypes/cross_mode_lane_stress.py index e66cea5..eaa8774 100644 --- a/gr2/prototypes/cross_mode_lane_stress.py +++ b/gr2/prototypes/cross_mode_lane_stress.py @@ -92,23 +92,47 @@ def init_workspace(workspace_root: Path) -> None: path = "repos/web" url = "https://example.invalid/web.git" +[[repos]] +name = "premium" +path = "repos/premium" +url = "https://example.invalid/premium.git" + +[workspace_constraints] +max_concurrent_edit_leases_global = 2 + +[workspace_constraints.required_reviewers] +premium = 2 +app = 1 + [[units]] name = "atlas" path = "agents/atlas" agent_id = "atlas-agent" -repos = ["app", "api", "web"] +repos = ["app", "api", "web", "premium"] [[units]] name = "apollo" path = "agents/apollo" agent_id = "apollo-agent" -repos = ["app", "api", "web"] +repos = ["app", "api", "web", "premium"] [[units]] name = "layne" path = "agents/layne" agent_id = "layne-human" -repos = ["app", "api", "web"] +repos = ["app", "api", "web", "premium"] + +[[units]] +name = "synapt-core" +path = "agents/synapt-core" +agent_id = "agent_opus_abc123" +repos = ["app", "api", "web", "premium"] + +[[units]] +name = "release-control" +path = "agents/release-control" +agent_id = "agent_opus_abc123" +repos = ["app", "api", "web", "premium"] """ (workspace_root / ".grip" / "workspace_spec.toml").write_text(spec) @@ -202,6 +226,22 @@ def show_leases_json(root: Path, workspace_root: Path, owner_unit: str, lane_nam return json.loads(proc.stdout) +def check_review_requirements_json(root: Path, workspace_root: Path, repo: str, pr_number: int) -> dict: + proc = run( + [ + "python3", + str(lane_proto(root)), + "check-review-requirements", + str(workspace_root), + repo, + str(pr_number), + "--json", + ], + capture=True, + ) + return json.loads(proc.stdout) + + def list_lanes_text(root: Path, workspace_root: Path, owner_unit: str | None = None) -> str: argv = [ "python3", @@ -215,6 +255,33 @@ def list_lanes_text(root: Path, workspace_root: Path, owner_unit: str | None = N return proc.stdout +def plan_handoff_json( + root: Path, + workspace_root: Path, + source_owner_unit: str, + source_lane_name: str, + target_unit: str, + mode: str, + target_lane_name: str | None = None, +) -> dict: + argv = [ + "python3", + str(lane_proto(root)), + "plan-handoff", + str(workspace_root), + source_owner_unit, + source_lane_name, + target_unit, + "--mode", + mode, + "--json", + ] + if target_lane_name: + argv.extend(["--target-lane-name", target_lane_name]) + proc = run(argv, capture=True) + return json.loads(proc.stdout) + + def scenario_multi_agent_same_repo(root: Path, workspace_root: Path) -> ScenarioResult: create_lane(root, workspace_root, "atlas", "feat-router", "app", "feat/router") create_lane(root, workspace_root, "apollo", "feat-materialize", "app", "feat/materialize") @@ -253,6 +320,91 @@ def scenario_multi_agent_same_repo(root: Path, workspace_root: Path) -> Scenario ) +def scenario_agent_handoff_relay(root: Path, workspace_root: Path) -> ScenarioResult: + create_lane(root, workspace_root, "atlas", "feat-router", "app,api", "feat/router") + run( + [ + "python3", + str(lane_proto(root)), + "share-lane", + str(workspace_root), + "atlas", + "feat-router", + "apollo", + ] + ) + shared_plan = plan_handoff_json( + root, + workspace_root, + "atlas", + "feat-router", + "apollo", + "shared", + ) + run( + [ + "python3", + str(lane_proto(root)), + "create-continuation-lane", + str(workspace_root), + "atlas", + "feat-router", + "apollo", + "feat-router-relay", + ] + ) + continuation_plan = plan_handoff_json( + root, + workspace_root, + "atlas", + "feat-router", + "apollo", + "continuation", + "feat-router-relay", + ) + + holds = [] + gaps = [] + evidence = [ + json.dumps(shared_plan, indent=2), + json.dumps(continuation_plan, indent=2), + ] + + if not shared_plan["invariant_assessment"]["unit_scoped"]: + holds.append("cross-unit shared-lane relay exposes the unit-scoping violation directly") + else: + gaps.append("shared-lane relay incorrectly appears unit-scoped") + + shared_cwds = {row["cwd"] for row in shared_plan["exec_rows"]} + if all("/agents/atlas/lanes/feat-router/" in cwd for cwd in shared_cwds): + holds.append("shared-lane relay forces the target unit to execute inside the source unit lane root") + else: + gaps.append("shared-lane relay did not clearly surface source-unit cwd ownership") + + if continuation_plan["invariant_assessment"]["unit_scoped"]: + holds.append("continuation lane preserves unit-scoped cwd and lease ownership") + else: + gaps.append("continuation lane did not preserve unit scoping") + + continuation_cwds = {row["cwd"] for row in continuation_plan["exec_rows"]} + if all("/agents/apollo/lanes/feat-router-relay/" in cwd for cwd in continuation_cwds): + holds.append("continuation lane gives the target unit an independent lane root") + verdict = "holds" + else: + gaps.append("continuation lane did not create target-unit-local execution roots") + verdict = "fails" + + return ScenarioResult( + scenario_id="agent-handoff-relay", + user_mode="multi-agent", + title="agent-to-agent lane handoff prefers continuation over cross-unit shared lanes", + verdict=verdict, + holds=holds, + gaps=gaps, + evidence=evidence, + ) + + def scenario_mixed_same_lane_exec(root: Path, workspace_root: Path) -> ScenarioResult: create_lane(root, workspace_root, "layne", "feat-blog", "app", "feat/blog") acquire_lease(root, workspace_root, "layne", "feat-blog", "human:layne", "edit") @@ -635,6 +787,252 @@ def scenario_solo_human_forgets_lane(root: Path, workspace_root: Path) -> Scenar ) +def scenario_identity_rebind_live_lanes(root: Path, workspace_root: Path) -> ScenarioResult: + create_lane(root, workspace_root, "synapt-core", "feat-auth", "app,api", "feat/auth") + create_lane(root, workspace_root, "synapt-core", "feat-deploy", "web", "feat/deploy") + acquire_lease(root, workspace_root, "synapt-core", "feat-auth", "agent:opus", "edit") + acquire_lease(root, workspace_root, "synapt-core", "feat-deploy", "agent:opus", "edit") + run( + [ + "python3", + str(lane_proto(root)), + "enter-lane", + str(workspace_root), + "synapt-core", + "feat-auth", + "--actor", + "agent:opus", + ] + ) + rebind_active = run( + [ + "python3", + str(lane_proto(root)), + "rebind-unit", + str(workspace_root), + "synapt-core", + "release-control", + "--actor", + "premium:control-plane", + "--json", + ], + capture=True, + ) + rebind_active_doc = json.loads(rebind_active.stdout) + blocked_old = plan_exec_json(root, workspace_root, "synapt-core", "feat-auth", "cargo test") + continuation = plan_handoff_json( + root, + workspace_root, + "synapt-core", + "feat-auth", + "release-control", + "continuation", + "feat-auth-relay", + ) + history_proc = run( + [ + "python3", + str(lane_proto(root)), + "lane-history", + str(workspace_root), + "synapt-core", + "--json", + ], + capture=True, + ) + history_rows = json.loads(history_proc.stdout) + + clean_workspace = workspace_root / "clean-rebind" + clean_workspace.mkdir(parents=True, exist_ok=True) + init_workspace(clean_workspace) + create_lane(root, clean_workspace, "synapt-core", "feat-clean", "app", "feat/clean") + rebind_clean = run( + [ + "python3", + str(lane_proto(root)), + "rebind-unit", + str(clean_workspace), + "synapt-core", + "release-control", + "--actor", + "premium:control-plane", + "--json", + ], + capture=True, + ) + rebind_clean_doc = json.loads(rebind_clean.stdout) + + holds = [] + gaps = [] + evidence = [ + json.dumps(rebind_active_doc, indent=2), + json.dumps(blocked_old, indent=2), + json.dumps(continuation, indent=2), + json.dumps(history_rows, indent=2), + json.dumps(rebind_clean_doc, indent=2), + ] + + if all(item["status"] == "frozen" for item in rebind_active_doc["affected_lanes"]): + holds.append("active lanes stay in the old unit and become frozen rather than moving silently") + else: + gaps.append("rebind did not freeze old lanes deterministically") + + if len(rebind_active_doc["expired_leases"]) == 2: + holds.append("active edit leases are force-released during rebind") + else: + gaps.append("active leases were not force-released during rebind") + + if isinstance(blocked_old, dict) and blocked_old.get("reason") == "unit-rebound": + holds.append("old unit lanes are blocked for further exec planning after rebind") + else: + gaps.append("old unit lanes were not blocked after rebind") + + if continuation["invariant_assessment"]["unit_scoped"]: + holds.append("post-rebind recovery path is continuation under the new unit") + else: + gaps.append("rebind recovery path did not preserve unit scoping") + + if any(row["type"] == "unit_rebind" for row in history_rows): + holds.append("lane event history records the unit rebind for recall reconstruction") + else: + gaps.append("lane history did not record the rebind transition") + + contract = rebind_active_doc.get("required_contract", {}) + if contract.get("same_agent_id") and contract.get("old_to_new_mapping"): + holds.append("prototype identifies same-agent-id continuity and explicit old->new mapping as required contract") + else: + gaps.append("rebind contract requirements are not explicit enough") + + if rebind_clean_doc["expired_leases"] == []: + holds.append("clean rebind with no active leases avoids unnecessary lease churn") + else: + gaps.append("clean rebind unexpectedly expired leases") + + verdict = "holds" if not gaps else "fails" + return ScenarioResult( + scenario_id="identity-rebind-live-lanes", + user_mode="single-agent", + title="identity rebinding freezes old lanes and resumes through continuation under the new unit", + verdict=verdict, + holds=holds, + gaps=gaps, + evidence=evidence, + ) + + +def scenario_global_edit_lease_cap(root: Path, workspace_root: Path) -> ScenarioResult: + create_lane(root, workspace_root, "atlas", "feat-cap-a", "app", "feat/cap-a") + create_lane(root, workspace_root, "apollo", "feat-cap-b", "api", "feat/cap-b") + create_lane(root, workspace_root, "layne", "feat-cap-c", "web", "feat/cap-c") + + create_lane(root, workspace_root, "release-control", "feat-cap-stale", "premium", "feat/cap-stale") + acquire_lease(root, workspace_root, "release-control", "feat-cap-stale", "agent:opus", "edit", ttl_seconds=0) + + lease_a = acquire_lease(root, workspace_root, "atlas", "feat-cap-a", "agent:atlas", "edit") + lease_b = acquire_lease(root, workspace_root, "apollo", "feat-cap-b", "agent:apollo", "edit") + lease_c = acquire_lease( + root, workspace_root, "layne", "feat-cap-c", "human:layne", "edit", expect_ok=False + ) + + stale_force = acquire_lease( + root, + workspace_root, + "release-control", + "feat-cap-stale", + "agent:opus", + "edit", + ttl_seconds=900, + force=True, + expect_ok=False, + ) + + holds = [] + gaps = [] + evidence = [lease_c.stdout.strip(), stale_force.stdout.strip()] + + if lease_a.returncode == 0 and lease_b.returncode == 0 and lease_c.returncode != 0: + holds.append("third edit lease is blocked when global cap of 2 is reached") + else: + gaps.append("global edit lease cap did not block the third concurrent edit lease") + + if stale_force.returncode != 0 and "workspace-edit-lease-cap" in stale_force.stdout: + holds.append("force-breaking a stale local lease does not bypass the workspace edit cap") + else: + gaps.append("stale force-break bypassed the workspace edit lease cap") + + # Release leases so later scenarios aren't blocked by the global cap + for unit, lane, actor in [ + ("atlas", "feat-cap-a", "agent:atlas"), + ("apollo", "feat-cap-b", "agent:apollo"), + ]: + run( + [ + "python3", + str(lane_proto(root)), + "release-lane-lease", + str(workspace_root), + unit, + lane, + "--actor", + actor, + ], + capture=True, + ) + + verdict = "holds" if not gaps else "fails" + return ScenarioResult( + scenario_id="global-edit-lease-cap", + user_mode="cross-mode", + title="workspace-wide edit lease cap is enforced across all units", + verdict=verdict, + holds=holds, + gaps=gaps, + evidence=evidence, + ) + + +def scenario_required_reviewers(root: Path, workspace_root: Path) -> ScenarioResult: + zero = check_review_requirements_json(root, workspace_root, "premium", 777) + create_review_lane(root, workspace_root, "atlas", "premium", 777) + one = check_review_requirements_json(root, workspace_root, "premium", 777) + create_review_lane(root, workspace_root, "apollo", "premium", 777) + two = check_review_requirements_json(root, workspace_root, "premium", 777) + + holds = [] + gaps = [] + evidence = [ + json.dumps(zero, indent=2), + json.dumps(one, indent=2), + json.dumps(two, indent=2), + ] + + if zero["required_reviewers"] == 2 and zero["actual_reviewers"] == 0 and not zero["satisfied"]: + holds.append("review requirements report unsatisfied with zero review lanes") + else: + gaps.append("zero-reviewer requirement state is incorrect") + + if one["actual_reviewers"] == 1 and not one["satisfied"]: + holds.append("one review lane is still unsatisfied when premium requires two reviewers") + else: + gaps.append("single reviewer state is incorrect") + + if two["actual_reviewers"] == 2 and two["satisfied"]: + holds.append("two review lanes satisfy the premium repo review requirement") + else: + gaps.append("two reviewers did not satisfy the premium repo requirement") + + verdict = "holds" if not gaps else "fails" + return ScenarioResult( + scenario_id="required-reviewers", + user_mode="cross-mode", + title="required reviewer counts are enforced from workspace constraints", + verdict=verdict, + holds=holds, + gaps=gaps, + evidence=evidence, + ) + + def run_scenarios(workspace_root: Path) -> list[ScenarioResult]: root = repo_root() init_workspace(workspace_root) @@ -643,6 +1041,10 @@ def run_scenarios(workspace_root: Path) -> list[ScenarioResult]: scenario_lease_conflict_matrix(root, workspace_root), scenario_stale_lease_force_break(root, workspace_root), scenario_multi_agent_same_repo(root, workspace_root), + scenario_agent_handoff_relay(root, workspace_root), + scenario_identity_rebind_live_lanes(root, workspace_root), + scenario_global_edit_lease_cap(root, workspace_root), + scenario_required_reviewers(root, workspace_root), scenario_mixed_same_lane_exec(root, workspace_root), scenario_single_agent_interrupt_recovery(root, workspace_root), scenario_solo_human_forgets_lane(root, workspace_root), diff --git a/gr2/prototypes/identity_unit_binding.py b/gr2/prototypes/identity_unit_binding.py new file mode 100644 index 0000000..80cb7d9 --- /dev/null +++ b/gr2/prototypes/identity_unit_binding.py @@ -0,0 +1,336 @@ +#!/usr/bin/env python3 +"""Prototype premium-owned identity -> gr2 unit binding. + +This models the integration seam where Premium owns durable agent identity and +workspace assignment, while gr2 consumes only the compiled workspace-scoped +unit view. + +Key rule: +- Premium resolves persistent identity and org membership. +- gr2 only materializes units and lane policy from the compiled spec. +""" + +from __future__ import annotations + +import argparse +import dataclasses +import json +from typing import Any + + +@dataclasses.dataclass +class AgentIdentity: + handle: str + persistent_id: str + kind: str + + +@dataclasses.dataclass +class WorkspaceAssignment: + workspace_id: str + workspace_name: str + owner_unit: str + unit_path: str + repo_access: list[str] + lane_limit: int + role: str + active: bool = True + + +@dataclasses.dataclass +class PremiumAgentRecord: + identity: AgentIdentity + assignments: list[WorkspaceAssignment] + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Prototype premium identity -> gr2 unit binding" + ) + sub = parser.add_subparsers(dest="command", required=True) + + demo = sub.add_parser("demo") + demo.add_argument("--json", action="store_true") + + resolve_cmd = sub.add_parser("resolve-binding") + resolve_cmd.add_argument("workspace_id") + resolve_cmd.add_argument("handle") + resolve_cmd.add_argument("--json", action="store_true") + + compile_cmd = sub.add_parser("compile-workspace") + compile_cmd.add_argument("workspace_id") + compile_cmd.add_argument( + "--scenario", + choices=["baseline", "reassigned"], + default="baseline", + ) + compile_cmd.add_argument("--json", action="store_true") + + return parser.parse_args() + + +def sample_org_state() -> dict[str, PremiumAgentRecord]: + return { + "opus": PremiumAgentRecord( + identity=AgentIdentity( + handle="opus", + persistent_id="agent_opus_abc123", + kind="agent", + ), + assignments=[ + WorkspaceAssignment( + workspace_id="ws_synapt_core", + workspace_name="synapt-core", + owner_unit="synapt-core", + unit_path="agents/synapt-core", + repo_access=["grip", "premium", "recall"], + lane_limit=2, + role="core-agent", + ), + WorkspaceAssignment( + workspace_id="ws_blog", + workspace_name="blog-studio", + owner_unit="editorial-opus", + unit_path="agents/editorial-opus", + repo_access=["blog", "marketing-site"], + lane_limit=1, + role="editorial-agent", + ), + ], + ), + "apollo": PremiumAgentRecord( + identity=AgentIdentity( + handle="apollo", + persistent_id="agent_apollo_def456", + kind="agent", + ), + assignments=[ + WorkspaceAssignment( + workspace_id="ws_synapt_core", + workspace_name="synapt-core", + owner_unit="materialization", + unit_path="agents/materialization", + repo_access=["grip", "premium"], + lane_limit=2, + role="build-agent", + ) + ], + ), + "atlas": PremiumAgentRecord( + identity=AgentIdentity( + handle="atlas", + persistent_id="agent_atlas_ghi789", + kind="agent", + ), + assignments=[ + WorkspaceAssignment( + workspace_id="ws_synapt_core", + workspace_name="synapt-core", + owner_unit="design-research", + unit_path="agents/design-research", + repo_access=["grip", "premium", "recall", "config"], + lane_limit=3, + role="design-agent", + ) + ], + ), + } + + +def reassigned_org_state() -> dict[str, PremiumAgentRecord]: + state = sample_org_state() + opus = state["opus"] + retained = [ + assignment + for assignment in opus.assignments + if assignment.workspace_id != "ws_synapt_core" + ] + opus.assignments = [ + WorkspaceAssignment( + workspace_id="ws_synapt_core", + workspace_name="synapt-core", + owner_unit="release-control", + unit_path="agents/release-control", + repo_access=["grip", "premium", "recall"], + lane_limit=2, + role="release-agent", + ), + *retained, + ] + return state + + +def resolve_binding(state: dict[str, PremiumAgentRecord], workspace_id: str, handle: str) -> dict[str, Any]: + record = state.get(handle) + if not record: + raise SystemExit(f"unknown agent handle: {handle}") + for assignment in record.assignments: + if assignment.workspace_id == workspace_id and assignment.active: + return { + "premium_identity": { + "handle": record.identity.handle, + "persistent_id": record.identity.persistent_id, + "kind": record.identity.kind, + }, + "workspace_binding": dataclasses.asdict(assignment), + "gr2_view": { + "owner_unit": assignment.owner_unit, + "unit_path": assignment.unit_path, + "repo_access": assignment.repo_access, + "lane_limit": assignment.lane_limit, + "agent_id": record.identity.persistent_id, + }, + } + raise SystemExit(f"no active assignment for {handle} in workspace {workspace_id}") + + +def compile_workspace_spec_view(state: dict[str, PremiumAgentRecord], workspace_id: str) -> dict[str, Any]: + units: list[dict[str, Any]] = [] + premium_bindings: list[dict[str, Any]] = [] + for record in state.values(): + for assignment in record.assignments: + if assignment.workspace_id != workspace_id or not assignment.active: + continue + premium_bindings.append( + { + "handle": record.identity.handle, + "persistent_id": record.identity.persistent_id, + "workspace_id": assignment.workspace_id, + "owner_unit": assignment.owner_unit, + "role": assignment.role, + } + ) + units.append( + { + "name": assignment.owner_unit, + "path": assignment.unit_path, + "agent_id": record.identity.persistent_id, + "repos": assignment.repo_access, + "policy": { + "lane_limit": assignment.lane_limit, + "role": assignment.role, + }, + } + ) + return { + "premium_knows": { + "workspace_id": workspace_id, + "bindings": premium_bindings, + "notes": [ + "persistent identity is resolved in premium", + "premium owns reassignment and org membership", + "premium can compile different owner_unit names per workspace for the same agent", + ], + }, + "gr2_sees": { + "workspace_spec_fragment": { + "workspace_id": workspace_id, + "units": units, + }, + "notes": [ + "gr2 consumes workspace-scoped units only", + "agent_id is an opaque identifier for attribution, not org resolution logic", + "gr2 does not decide who an agent is or where else it is assigned", + ], + }, + } + + +def demo_payload() -> dict[str, Any]: + baseline = sample_org_state() + reassigned = reassigned_org_state() + return { + "design_rule": { + "premium_owns": [ + "persistent agent identity", + "org membership", + "workspace assignment", + "reassignment history", + ], + "gr2_owns": [ + "workspace-scoped unit directories", + "lane and lease enforcement", + "local execution surfaces", + ], + }, + "same_agent_two_workspaces": { + "synapt_core": resolve_binding(baseline, "ws_synapt_core", "opus"), + "blog_workspace": resolve_binding(baseline, "ws_blog", "opus"), + "explanation": "one persistent agent can bind to different owner_unit names in different workspaces", + }, + "org_reassignment": { + "before": resolve_binding(baseline, "ws_synapt_core", "opus"), + "after": resolve_binding(reassigned, "ws_synapt_core", "opus"), + "explanation": "premium changes the binding and recompiles the workspace view; gr2 does not infer reassignment itself", + }, + "compiled_workspace_view": compile_workspace_spec_view(baseline, "ws_synapt_core"), + } + + +def print_human(payload: dict[str, Any]) -> None: + print("gr2 identity -> unit binding prototype") + print() + print("Premium owns:") + for item in payload["design_rule"]["premium_owns"]: + print(f"- {item}") + print("gr2 owns:") + for item in payload["design_rule"]["gr2_owns"]: + print(f"- {item}") + print() + same = payload["same_agent_two_workspaces"] + print("Same agent across two workspaces") + print( + f"- opus in synapt-core -> {same['synapt_core']['workspace_binding']['owner_unit']}" + ) + print( + f"- opus in blog-studio -> {same['blog_workspace']['workspace_binding']['owner_unit']}" + ) + print(f"- {same['explanation']}") + print() + reassignment = payload["org_reassignment"] + print("Org reassignment") + print( + f"- before: {reassignment['before']['workspace_binding']['owner_unit']}" + ) + print( + f"- after: {reassignment['after']['workspace_binding']['owner_unit']}" + ) + print(f"- {reassignment['explanation']}") + print() + compiled = payload["compiled_workspace_view"] + print("Compiled workspace fragment gr2 sees") + for unit in compiled["gr2_sees"]["workspace_spec_fragment"]["units"]: + print( + f"- unit={unit['name']} agent_id={unit['agent_id']} repos={','.join(unit['repos'])} lane_limit={unit['policy']['lane_limit']}" + ) + + +def main() -> int: + args = parse_args() + if args.command == "demo": + payload = demo_payload() + if args.json: + print(json.dumps(payload, indent=2)) + else: + print_human(payload) + return 0 + if args.command == "resolve-binding": + payload = resolve_binding(sample_org_state(), args.workspace_id, args.handle) + if args.json: + print(json.dumps(payload, indent=2)) + else: + print(json.dumps(payload, indent=2)) + return 0 + if args.command == "compile-workspace": + state = sample_org_state() if args.scenario == "baseline" else reassigned_org_state() + payload = compile_workspace_spec_view(state, args.workspace_id) + if args.json: + print(json.dumps(payload, indent=2)) + else: + print(json.dumps(payload, indent=2)) + return 0 + raise SystemExit(f"unknown command: {args.command}") + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/gr2/prototypes/lane_workspace_prototype.py b/gr2/prototypes/lane_workspace_prototype.py index 678924e..5374926 100644 --- a/gr2/prototypes/lane_workspace_prototype.py +++ b/gr2/prototypes/lane_workspace_prototype.py @@ -43,6 +43,8 @@ class LaneMetadata: private_context_roots: list[str] exec_defaults: dict[str, object] creation_source: str + shared_with: list[str] + handoff_source: dict[str, str] | None def as_toml(self) -> str: lines = [ @@ -54,6 +56,7 @@ def as_toml(self) -> str: f'creation_source = "{self.creation_source}"', "", f"repos = [{', '.join(f'\"{r}\"' for r in self.repos)}]", + f"shared_with = [{', '.join(f'\"{u}\"' for u in self.shared_with)}]", "", "[branch_map]", ] @@ -82,6 +85,17 @@ def as_toml(self) -> str: for assoc in self.pr_associations: lines.extend(["", "[[pr_associations]]", f'ref = "{assoc}"']) + if self.handoff_source: + lines.extend( + [ + "", + "[handoff]", + f'kind = "{self.handoff_source["kind"]}"', + f'source_owner_unit = "{self.handoff_source["source_owner_unit"]}"', + f'source_lane = "{self.handoff_source["source_lane"]}"', + ] + ) + return "\n".join(lines) + "\n" @@ -157,6 +171,28 @@ def parse_args() -> argparse.Namespace: review.add_argument("--lane-name") review.add_argument("--branch") + share_lane = sub.add_parser("share-lane") + share_lane.add_argument("workspace_root", type=Path) + share_lane.add_argument("owner_unit") + share_lane.add_argument("lane_name") + share_lane.add_argument("target_unit") + + continuation = sub.add_parser("create-continuation-lane") + continuation.add_argument("workspace_root", type=Path) + continuation.add_argument("source_owner_unit") + continuation.add_argument("source_lane_name") + continuation.add_argument("target_unit") + continuation.add_argument("target_lane_name") + + handoff = sub.add_parser("plan-handoff") + handoff.add_argument("workspace_root", type=Path) + handoff.add_argument("source_owner_unit") + handoff.add_argument("source_lane_name") + handoff.add_argument("target_unit") + handoff.add_argument("--mode", choices=["shared", "continuation"], required=True) + handoff.add_argument("--target-lane-name") + handoff.add_argument("--json", action="store_true") + show = sub.add_parser("show-lane") show.add_argument("workspace_root", type=Path) show.add_argument("owner_unit") @@ -204,6 +240,13 @@ def parse_args() -> argparse.Namespace: history.add_argument("owner_unit") history.add_argument("--json", action="store_true") + rebind = sub.add_parser("rebind-unit") + rebind.add_argument("workspace_root", type=Path) + rebind.add_argument("old_owner_unit") + rebind.add_argument("new_owner_unit") + rebind.add_argument("--actor", required=True) + rebind.add_argument("--json", action="store_true") + lease = sub.add_parser("acquire-lane-lease") lease.add_argument("workspace_root", type=Path) lease.add_argument("owner_unit") @@ -274,6 +317,12 @@ def parse_args() -> argparse.Namespace: recommend.add_argument("--repos", type=int, default=1) recommend.add_argument("--shared-draft", action="store_true") + review_check = sub.add_parser("check-review-requirements") + review_check.add_argument("workspace_root", type=Path) + review_check.add_argument("repo") + review_check.add_argument("pr_number", type=int) + review_check.add_argument("--json", action="store_true") + return parser.parse_args() @@ -305,6 +354,21 @@ def lane_leases_lock_file(workspace_root: Path, owner_unit: str, lane_name: str) return lane_dir(workspace_root, owner_unit, lane_name) / "leases.lock" +def shared_lane_access_file(workspace_root: Path, owner_unit: str, lane_name: str) -> Path: + return ( + workspace_root + / ".grip" + / "state" + / "shared_lane_access" + / owner_unit + / f"{lane_name}.json" + ) + + +def unit_rebind_file(workspace_root: Path, owner_unit: str) -> Path: + return workspace_root / ".grip" / "state" / "rebindings" / f"{owner_unit}.json" + + def events_dir(workspace_root: Path) -> Path: return workspace_root / ".grip" / "events" @@ -351,6 +415,13 @@ def load_current_lane_doc(workspace_root: Path, owner_unit: str) -> dict: return json.loads(path.read_text()) +def load_unit_rebind_doc(workspace_root: Path, owner_unit: str) -> dict | None: + path = unit_rebind_file(workspace_root, owner_unit) + if not path.exists(): + return None + return json.loads(path.read_text()) + + def append_jsonl(path: Path, payload: dict) -> None: path.parent.mkdir(parents=True, exist_ok=True) with path.open("a", encoding="utf-8") as fh: @@ -378,6 +449,11 @@ def iter_lane_events(workspace_root: Path) -> list[dict]: return items +def write_json(path: Path, payload: dict) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(payload, indent=2) + "\n") + + def load_lane_leases(workspace_root: Path, owner_unit: str, lane_name: str) -> list[dict]: path = lane_leases_file(workspace_root, owner_unit, lane_name) if not path.exists(): @@ -385,6 +461,37 @@ def load_lane_leases(workspace_root: Path, owner_unit: str, lane_name: str) -> l return json.loads(path.read_text()) +def workspace_constraints(workspace_root: Path) -> dict: + spec = load_workspace_spec(workspace_root) + return spec.get("workspace_constraints", {}) + + +def max_global_edit_leases(workspace_root: Path) -> int | None: + value = workspace_constraints(workspace_root).get("max_concurrent_edit_leases_global") + if value is None: + return None + return int(value) + + +def active_workspace_edit_leases(workspace_root: Path) -> list[dict]: + rows: list[dict] = [] + for path in iter_lane_files(workspace_root): + lane_doc = tomllib.loads(path.read_text()) + owner_unit = lane_doc["owner_unit"] + lane_name = lane_doc["lane_name"] + for lease in load_lane_leases(workspace_root, owner_unit, lane_name): + if lease["mode"] != "edit" or is_stale_lease(lease): + continue + rows.append( + { + "owner_unit": owner_unit, + "lane_name": lane_name, + **lease, + } + ) + return rows + + def lease_locking_enabled() -> bool: return os.environ.get("GR2_DISABLE_LEASE_LOCKING") != "1" @@ -596,6 +703,8 @@ def create_lane(args: argparse.Namespace) -> int: "commands": args.default_commands, }, creation_source=args.source, + shared_with=[], + handoff_source=None, ) lane_file(workspace_root, args.owner_unit, args.lane_name).write_text(metadata.as_toml()) print(lane_file(workspace_root, args.owner_unit, args.lane_name)) @@ -757,9 +866,141 @@ def lane_history(args: argparse.Namespace) -> int: return 0 +def rebind_unit(args: argparse.Namespace) -> int: + workspace_root = args.workspace_root.resolve() + old_unit = find_unit_spec(workspace_root, args.old_owner_unit) + new_unit = find_unit_spec(workspace_root, args.new_owner_unit) + old_agent_id = old_unit.get("agent_id") + new_agent_id = new_unit.get("agent_id") + if not old_agent_id or not new_agent_id or old_agent_id != new_agent_id: + raise SystemExit("rebind-unit requires old and new units with the same agent_id") + + lane_paths = iter_lane_files(workspace_root, args.old_owner_unit) + affected_lanes: list[dict] = [] + expired_leases: list[dict] = [] + for path in lane_paths: + lane_doc = tomllib.loads(path.read_text()) + lane_name = lane_doc["lane_name"] + leases = load_lane_leases(workspace_root, args.old_owner_unit, lane_name) + active_leases = [lease for lease in leases if not is_stale_lease(lease)] + if active_leases: + write_lane_leases(workspace_root, args.old_owner_unit, lane_name, []) + for lease in active_leases: + expired_leases.append( + { + "lane_name": lane_name, + "actor": lease["actor"], + "mode": lease["mode"], + "acquired_at": lease["acquired_at"], + "expires_at": lease.get("expires_at"), + } + ) + emit_lane_event( + workspace_root, + { + "type": "lease_release", + "agent": lease["actor"], + "agent_id": old_agent_id, + "owner_unit": args.old_owner_unit, + "lane": lane_name, + "lane_type": lane_doc["lane_type"], + "repos": lane_doc.get("repos", []), + "timestamp": now_utc(), + "reason": "unit_rebind", + }, + ) + + affected_lanes.append( + { + "lane_name": lane_name, + "lane_type": lane_doc["lane_type"], + "repos": lane_doc.get("repos", []), + "status": "frozen", + "expired_lease_count": len(active_leases), + } + ) + + current_lane = None + current_path = current_lane_file(workspace_root, args.old_owner_unit) + if current_path.exists(): + current_doc = json.loads(current_path.read_text()) + current_lane = current_doc.get("current") + updated = dict(current_doc) + updated["status"] = "rebound" + updated["rebound_to"] = args.new_owner_unit + write_json(current_path, updated) + + rebind_doc = { + "old_owner_unit": args.old_owner_unit, + "new_owner_unit": args.new_owner_unit, + "agent_id": old_agent_id, + "rebound_at": now_utc(), + "actor": args.actor, + "status": "complete", + "affected_lanes": affected_lanes, + "expired_leases": expired_leases, + "current_lane_at_rebind": current_lane, + "required_contract": { + "same_agent_id": True, + "old_to_new_mapping": True, + "pending_reassignment_hint": "recommended", + }, + } + write_json(unit_rebind_file(workspace_root, args.old_owner_unit), rebind_doc) + emit_lane_event( + workspace_root, + { + "type": "unit_rebind", + "agent": args.actor, + "agent_id": old_agent_id, + "owner_unit": args.old_owner_unit, + "new_owner_unit": args.new_owner_unit, + "lane": current_lane["lane_name"] if current_lane else None, + "lane_type": current_lane["lane_type"] if current_lane else None, + "repos": sorted({repo for item in affected_lanes for repo in item["repos"]}), + "timestamp": rebind_doc["rebound_at"], + "frozen_lane_count": len(affected_lanes), + "expired_lease_count": len(expired_leases), + }, + ) + if args.json: + print(json.dumps(rebind_doc, indent=2)) + else: + print(json.dumps(rebind_doc, indent=2)) + return 0 + + def acquire_lane_lease(args: argparse.Namespace) -> int: workspace_root = args.workspace_root.resolve() load_lane_doc(workspace_root, args.owner_unit, args.lane_name) + if args.mode == "edit": + cap = max_global_edit_leases(workspace_root) + if cap is not None: + active_edits = active_workspace_edit_leases(workspace_root) + active_edits = [ + lease + for lease in active_edits + if not ( + lease["owner_unit"] == args.owner_unit + and lease["lane_name"] == args.lane_name + and lease["actor"] == args.actor + ) + ] + if len(active_edits) >= cap: + payload = { + "status": "blocked", + "reason": "workspace-edit-lease-cap", + "requested": { + "owner_unit": args.owner_unit, + "lane_name": args.lane_name, + "actor": args.actor, + "mode": args.mode, + }, + "active_edit_leases": active_edits, + "max_concurrent_edit_leases_global": cap, + } + print(json.dumps(payload, indent=2)) + return 1 result = mutate_lane_leases( workspace_root, args.owner_unit, @@ -911,6 +1152,170 @@ def create_review_lane(args: argparse.Namespace) -> int: return 0 +def check_review_requirements(args: argparse.Namespace) -> int: + workspace_root = args.workspace_root.resolve() + ref = f"{args.repo}#{args.pr_number}" + required = int( + workspace_constraints(workspace_root) + .get("required_reviewers", {}) + .get(args.repo, 0) + ) + matching: list[dict] = [] + for path in iter_lane_files(workspace_root): + doc = tomllib.loads(path.read_text()) + if doc.get("lane_type") != "review": + continue + refs = [item["ref"] for item in doc.get("pr_associations", [])] + if ref not in refs: + continue + matching.append( + { + "owner_unit": doc["owner_unit"], + "lane_name": doc["lane_name"], + "repos": doc.get("repos", []), + } + ) + reviewer_units = sorted({row["owner_unit"] for row in matching}) + payload = { + "repo": args.repo, + "pr_number": args.pr_number, + "required_reviewers": required, + "actual_reviewers": len(reviewer_units), + "satisfied": len(reviewer_units) >= required, + "review_lanes": matching, + } + if args.json: + print(json.dumps(payload, indent=2)) + else: + print(json.dumps(payload, indent=2)) + return 0 + + +def share_lane(args: argparse.Namespace) -> int: + workspace_root = args.workspace_root.resolve() + load_lane_doc(workspace_root, args.owner_unit, args.lane_name) + find_unit_spec(workspace_root, args.target_unit) + access_path = shared_lane_access_file(workspace_root, args.owner_unit, args.lane_name) + access_path.parent.mkdir(parents=True, exist_ok=True) + if access_path.exists(): + doc = json.loads(access_path.read_text()) + else: + doc = { + "owner_unit": args.owner_unit, + "lane_name": args.lane_name, + "shared_with": [], + } + if args.target_unit not in doc["shared_with"]: + doc["shared_with"].append(args.target_unit) + access_path.write_text(json.dumps(doc, indent=2) + "\n") + print(access_path) + return 0 + + +def create_continuation_lane(args: argparse.Namespace) -> int: + workspace_root = args.workspace_root.resolve() + source = load_lane_doc(workspace_root, args.source_owner_unit, args.source_lane_name) + unit_spec = find_unit_spec(workspace_root, args.target_unit) + lane_root = lane_dir(workspace_root, args.target_unit, args.target_lane_name) + lane_root.mkdir(parents=True, exist_ok=True) + (lane_root / "repos").mkdir(exist_ok=True) + (lane_root / "context").mkdir(exist_ok=True) + + metadata = LaneMetadata( + schema_version=LANE_SCHEMA_VERSION, + lane_name=args.target_lane_name, + owner_unit=args.target_unit, + agent_id=unit_spec.get("agent_id"), + lane_type=source["lane_type"], + repos=source.get("repos", []), + branch_map=source.get("branch_map", {}), + pr_associations=[item["ref"] for item in source.get("pr_associations", [])], + shared_context_roots=source.get("context", {}).get("shared_roots", []), + private_context_roots=[ + f"agents/{args.target_unit}/home/context", + f"agents/{args.target_unit}/lanes/{args.target_lane_name}/context", + ], + exec_defaults=source.get("exec_defaults", {}), + creation_source="lane-handoff", + shared_with=[], + handoff_source={ + "kind": "continuation", + "source_owner_unit": args.source_owner_unit, + "source_lane": args.source_lane_name, + }, + ) + lane_file(workspace_root, args.target_unit, args.target_lane_name).write_text( + metadata.as_toml() + ) + print(lane_file(workspace_root, args.target_unit, args.target_lane_name)) + return 0 + + +def plan_handoff(args: argparse.Namespace) -> int: + workspace_root = args.workspace_root.resolve() + source = load_lane_doc(workspace_root, args.source_owner_unit, args.source_lane_name) + find_unit_spec(workspace_root, args.target_unit) + if args.mode == "shared": + access_path = shared_lane_access_file(workspace_root, args.source_owner_unit, args.source_lane_name) + access = json.loads(access_path.read_text()) if access_path.exists() else None + payload = { + "mode": "shared", + "source_owner_unit": args.source_owner_unit, + "source_lane_name": args.source_lane_name, + "target_unit": args.target_unit, + "shared_access_present": bool(access and args.target_unit in access.get("shared_with", [])), + "exec_rows": [ + { + "acting_unit": args.target_unit, + "owner_unit": args.source_owner_unit, + "lane_name": args.source_lane_name, + "repo": repo, + "cwd": str(workspace_root / "agents" / args.source_owner_unit / "lanes" / args.source_lane_name / "repos" / repo), + "lease_scope": f"{args.source_owner_unit}/{args.source_lane_name}", + } + for repo in source.get("repos", []) + ], + "invariant_assessment": { + "unit_scoped": False, + "reason": "target unit must execute inside another unit's lane root and lease scope", + }, + } + else: + target_lane_name = args.target_lane_name or f"{args.source_lane_name}-relay" + payload = { + "mode": "continuation", + "source_owner_unit": args.source_owner_unit, + "source_lane_name": args.source_lane_name, + "target_unit": args.target_unit, + "target_lane_name": target_lane_name, + "exec_rows": [ + { + "acting_unit": args.target_unit, + "owner_unit": args.target_unit, + "lane_name": target_lane_name, + "repo": repo, + "cwd": str(workspace_root / "agents" / args.target_unit / "lanes" / target_lane_name / "repos" / repo), + "lease_scope": f"{args.target_unit}/{target_lane_name}", + } + for repo in source.get("repos", []) + ], + "handoff_source": { + "source_owner_unit": args.source_owner_unit, + "source_lane_name": args.source_lane_name, + }, + "invariant_assessment": { + "unit_scoped": True, + "reason": "target unit gets its own lane root, lease scope, and current-lane state while keeping source linkage", + }, + } + + if args.json: + print(json.dumps(payload, indent=2)) + else: + print(json.dumps(payload, indent=2)) + return 0 + + def create_shared_scratchpad(args: argparse.Namespace) -> int: workspace_root = args.workspace_root.resolve() root = shared_scratchpad_dir(workspace_root, args.name) @@ -1106,6 +1511,29 @@ def next_step(args: argparse.Namespace) -> int: def plan_exec(args: argparse.Namespace) -> int: workspace_root = args.workspace_root.resolve() lane_doc = load_lane_doc(workspace_root, args.owner_unit, args.lane_name) + rebind_doc = load_unit_rebind_doc(workspace_root, args.owner_unit) + if rebind_doc: + affected = { + item["lane_name"]: item for item in rebind_doc.get("affected_lanes", []) + } + if args.lane_name in affected: + payload = { + "status": "blocked", + "reason": "unit-rebound", + "lane": lane_doc["lane_name"], + "owner_unit": lane_doc["owner_unit"], + "new_owner_unit": rebind_doc["new_owner_unit"], + "hint": "create a continuation lane under the new unit before resuming work", + } + if args.json: + print(json.dumps(payload, indent=2)) + else: + print("gr2 lane-exec prototype") + print("status=blocked reason=unit-rebound") + print( + f"owner={lane_doc['owner_unit']} rebound_to={rebind_doc['new_owner_unit']} lane={lane_doc['lane_name']}" + ) + return 0 leases = load_lane_leases(workspace_root, args.owner_unit, args.lane_name) active_conflicts, stale_conflicts = conflicting_leases(leases, "agent:exec-planner", "exec") if active_conflicts: @@ -1201,8 +1629,16 @@ def main() -> int: return current_lane(args) if args.command == "lane-history": return lane_history(args) + if args.command == "rebind-unit": + return rebind_unit(args) if args.command == "create-review-lane": return create_review_lane(args) + if args.command == "share-lane": + return share_lane(args) + if args.command == "create-continuation-lane": + return create_continuation_lane(args) + if args.command == "plan-handoff": + return plan_handoff(args) if args.command == "show-lane": return show_lane(args) if args.command == "list-lanes": @@ -1229,6 +1665,8 @@ def main() -> int: return plan_promote_scratchpad(args) if args.command == "recommend-surface": return recommend_surface(args) + if args.command == "check-review-requirements": + return check_review_requirements(args) raise SystemExit(f"unknown command: {args.command}") diff --git a/gr2/prototypes/org_policy_compiler.py b/gr2/prototypes/org_policy_compiler.py new file mode 100644 index 0000000..22edda7 --- /dev/null +++ b/gr2/prototypes/org_policy_compiler.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python3 +"""Prototype premium org/policy -> WorkspaceSpec compilation seam. + +Premium owns: +- org config +- roles and entitlements +- reviewer requirements +- global policy limits + +gr2 consumes only the compiled workspace-scoped result. +""" + +from __future__ import annotations + +import argparse +import json +from copy import deepcopy +from typing import Any + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Prototype premium org/policy -> WorkspaceSpec compilation" + ) + sub = parser.add_subparsers(dest="command", required=True) + + demo = sub.add_parser("demo") + demo.add_argument("--json", action="store_true") + + compile_cmd = sub.add_parser("compile") + compile_cmd.add_argument( + "--scenario", + choices=["baseline", "repo-update", "downgrade"], + default="baseline", + ) + compile_cmd.add_argument("--json", action="store_true") + + return parser.parse_args() + + +def premium_org_state() -> dict[str, Any]: + return { + "team_id": "team_synapt_core", + "workspace_id": "ws_synapt_core", + "workspace_name": "synapt-core", + "repos": ["grip", "premium", "recall", "config", "tests"], + "policy": { + "max_concurrent_edit_leases_global": 2, + "lane_naming_convention": "-", + "required_reviewers": { + "premium": 2, + "grip": 1, + "recall": 1, + }, + }, + "roles": { + "builder": { + "repo_access": ["grip", "premium", "recall", "config", "tests"], + "lane_limit": 3, + "allowed_lane_kinds": ["feature", "review", "scratch"], + }, + "qa": { + "repo_access": ["tests", "grip", "recall"], + "lane_limit": 2, + "allowed_lane_kinds": ["review", "scratch"], + }, + "design": { + "repo_access": ["grip", "premium", "config"], + "lane_limit": 3, + "allowed_lane_kinds": ["feature", "review", "scratch"], + }, + }, + "agents": [ + { + "handle": "opus", + "persistent_id": "agent_opus_abc123", + "owner_unit": "release-control", + "role": "builder", + "entitlements": ["premium", "channels", "recall", "multi_lane"], + }, + { + "handle": "sentinel", + "persistent_id": "agent_sentinel_def456", + "owner_unit": "qa-sentinel", + "role": "qa", + "entitlements": ["premium", "channels", "recall"], + }, + { + "handle": "atlas", + "persistent_id": "agent_atlas_ghi789", + "owner_unit": "design-research", + "role": "design", + "entitlements": ["premium", "channels", "recall", "multi_lane"], + }, + ], + } + + +def repo_update_state() -> dict[str, Any]: + state = deepcopy(premium_org_state()) + state["repos"].append("mission-control") + state["roles"]["builder"]["repo_access"].append("mission-control") + state["roles"]["design"]["repo_access"].append("mission-control") + state["policy"]["required_reviewers"]["mission-control"] = 2 + return state + + +def downgrade_state() -> dict[str, Any]: + state = deepcopy(premium_org_state()) + for agent in state["agents"]: + if agent["handle"] == "sentinel": + agent["entitlements"] = [] + return state + + +def compile_agent_unit(agent: dict[str, Any], state: dict[str, Any]) -> dict[str, Any]: + role_doc = state["roles"][agent["role"]] + entitlements = set(agent.get("entitlements", [])) + premium_enabled = "premium" in entitlements + + if premium_enabled: + repo_access = role_doc["repo_access"] + lane_limit = role_doc["lane_limit"] + allowed_lane_kinds = role_doc["allowed_lane_kinds"] + channels_enabled = "channels" in entitlements + recall_enabled = "recall" in entitlements + else: + repo_access = ["grip"] + lane_limit = 1 + allowed_lane_kinds = ["feature"] + channels_enabled = False + recall_enabled = False + + return { + "name": agent["owner_unit"], + "path": f"agents/{agent['owner_unit']}", + "agent_id": agent["persistent_id"], + "repos": repo_access, + "constraints": { + "lane_limit": lane_limit, + "allowed_lane_kinds": allowed_lane_kinds, + "channels_enabled": channels_enabled, + "recall_enabled": recall_enabled, + }, + } + + +def compile_workspace_spec(state: dict[str, Any]) -> dict[str, Any]: + return { + "workspace_name": state["workspace_name"], + "workspace_id": state["workspace_id"], + "repos": [{"name": repo, "path": f"repos/{repo}"} for repo in state["repos"]], + "units": [compile_agent_unit(agent, state) for agent in state["agents"]], + "workspace_constraints": { + "max_concurrent_edit_leases_global": state["policy"]["max_concurrent_edit_leases_global"], + "lane_naming_convention": state["policy"]["lane_naming_convention"], + "required_reviewers": state["policy"]["required_reviewers"], + }, + } + + +def premium_view(state: dict[str, Any]) -> dict[str, Any]: + return { + "team_id": state["team_id"], + "workspace_id": state["workspace_id"], + "agents": [ + { + "handle": agent["handle"], + "persistent_id": agent["persistent_id"], + "role": agent["role"], + "entitlements": agent["entitlements"], + "owner_unit": agent["owner_unit"], + } + for agent in state["agents"] + ], + "policy": state["policy"], + "notes": [ + "premium owns role evaluation and entitlement interpretation", + "premium decides how org policy degrades when entitlements are removed", + "gr2 should not infer role semantics from raw org config", + ], + } + + +def scenario_bundle() -> dict[str, Any]: + baseline = premium_org_state() + repo_update = repo_update_state() + downgrade = downgrade_state() + return { + "baseline": { + "premium_knows": premium_view(baseline), + "gr2_sees": compile_workspace_spec(baseline), + "summary": "org with 3 agents, 5 repos, max 2 concurrent edit leases globally", + }, + "role_access": { + "builder_repos": compile_agent_unit(baseline["agents"][0], baseline)["repos"], + "qa_repos": compile_agent_unit(baseline["agents"][1], baseline)["repos"], + "summary": "builders get all repos, QA gets test-focused access only", + }, + "repo_update": { + "before": compile_workspace_spec(baseline), + "after": compile_workspace_spec(repo_update), + "summary": "admin adds mission-control mid-sprint; affected units get updated access on recompilation", + }, + "downgrade": { + "before": compile_workspace_spec(baseline), + "after": compile_workspace_spec(downgrade), + "summary": "loss of premium degrades one unit to OSS defaults without injecting org logic into gr2", + }, + } + + +def print_human(payload: dict[str, Any]) -> None: + baseline = payload["baseline"] + print("gr2 org/policy compiler prototype") + print() + print(f"workspace: {baseline['gr2_sees']['workspace_name']}") + print( + f"global edit lease cap: {baseline['gr2_sees']['workspace_constraints']['max_concurrent_edit_leases_global']}" + ) + print("units gr2 sees:") + for unit in baseline["gr2_sees"]["units"]: + constraints = unit["constraints"] + print( + f"- {unit['name']} repos={','.join(unit['repos'])} lane_limit={constraints['lane_limit']} " + f"channels={constraints['channels_enabled']} recall={constraints['recall_enabled']}" + ) + print() + print("role-based access") + print(f"- builder repos: {','.join(payload['role_access']['builder_repos'])}") + print(f"- qa repos: {','.join(payload['role_access']['qa_repos'])}") + print() + print("repo update") + before_repos = payload["repo_update"]["before"]["repos"] + after_repos = payload["repo_update"]["after"]["repos"] + print(f"- before repos: {','.join(item['name'] for item in before_repos)}") + print(f"- after repos: {','.join(item['name'] for item in after_repos)}") + print() + print("downgrade") + before_units = { + unit["name"]: unit for unit in payload["downgrade"]["before"]["units"] + } + after_units = { + unit["name"]: unit for unit in payload["downgrade"]["after"]["units"] + } + target = "qa-sentinel" + print( + f"- {target} before: repos={','.join(before_units[target]['repos'])} lane_limit={before_units[target]['constraints']['lane_limit']}" + ) + print( + f"- {target} after: repos={','.join(after_units[target]['repos'])} lane_limit={after_units[target]['constraints']['lane_limit']}" + ) + + +def main() -> int: + args = parse_args() + if args.command == "demo": + payload = scenario_bundle() + if args.json: + print(json.dumps(payload, indent=2)) + else: + print_human(payload) + return 0 + if args.command == "compile": + if args.scenario == "baseline": + state = premium_org_state() + elif args.scenario == "repo-update": + state = repo_update_state() + else: + state = downgrade_state() + payload = { + "premium_knows": premium_view(state), + "gr2_sees": compile_workspace_spec(state), + } + print(json.dumps(payload, indent=2)) + return 0 + raise SystemExit(f"unknown command: {args.command}") + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/gr2/prototypes/recall_lane_history.py b/gr2/prototypes/recall_lane_history.py new file mode 100644 index 0000000..c6e1e11 --- /dev/null +++ b/gr2/prototypes/recall_lane_history.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +"""Prototype recall-friendly indexing over gr2 lane event history.""" + +from __future__ import annotations + +import argparse +import json +from collections import defaultdict +from datetime import UTC, datetime, timedelta +from pathlib import Path +from typing import Any + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Prototype recall lane history surface" + ) + sub = parser.add_subparsers(dest="command", required=True) + + demo = sub.add_parser("demo-data") + demo.add_argument("workspace_root", type=Path) + + query = sub.add_parser("query") + query.add_argument("workspace_root", type=Path) + query.add_argument("--lane") + query.add_argument("--actor") + query.add_argument("--repo") + query.add_argument("--start") + query.add_argument("--end") + query.add_argument("--json", action="store_true") + + return parser.parse_args() + + +def events_dir(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "events" + + +def lane_events_file(workspace_root: Path) -> Path: + return events_dir(workspace_root) / "lane_events.jsonl" + + +def append_jsonl(path: Path, payload: dict) -> None: + path.parent.mkdir(parents=True, exist_ok=True) + with path.open("a", encoding="utf-8") as fh: + fh.write(json.dumps(payload) + "\n") + + +def load_jsonl(path: Path) -> list[dict]: + if not path.exists(): + return [] + rows: list[dict] = [] + for line in path.read_text().splitlines(): + line = line.strip() + if not line: + continue + rows.append(json.loads(line)) + return rows + + +def parse_ts(raw: str) -> datetime: + return datetime.fromisoformat(raw) + + +def now_utc() -> datetime: + return datetime.now(UTC).replace(microsecond=0) + + +def event_key(event: dict) -> tuple: + return ( + event.get("timestamp", ""), + event.get("lane", ""), + event.get("type", ""), + event.get("agent", ""), + ) + + +def build_index(events: list[dict]) -> dict[str, Any]: + by_lane: dict[str, list[dict]] = defaultdict(list) + by_actor: dict[str, list[dict]] = defaultdict(list) + by_repo: dict[str, list[dict]] = defaultdict(list) + + for event in sorted(events, key=event_key): + lane = event.get("lane") + actor = event.get("agent") + repos = event.get("repos", []) + if lane: + by_lane[lane].append(event) + if actor: + by_actor[actor].append(event) + for repo in repos: + by_repo[repo].append(event) + + return { + "by_lane": dict(by_lane), + "by_actor": dict(by_actor), + "by_repo": dict(by_repo), + "all": sorted(events, key=event_key), + } + + +def lane_history(index: dict[str, Any], lane_name: str) -> dict[str, Any]: + rows = index["by_lane"].get(lane_name, []) + return { + "query": {"lane": lane_name}, + "count": len(rows), + "timeline": rows, + } + + +def actor_history(index: dict[str, Any], actor: str) -> dict[str, Any]: + rows = index["by_actor"].get(actor, []) + touched_lanes = sorted({row.get("lane") for row in rows if row.get("lane")}) + return { + "query": {"actor": actor}, + "count": len(rows), + "lanes": touched_lanes, + "timeline": rows, + } + + +def repo_activity(index: dict[str, Any], repo: str) -> dict[str, Any]: + rows = index["by_repo"].get(repo, []) + actors = sorted({row.get("agent") for row in rows if row.get("agent")}) + return { + "query": {"repo": repo}, + "count": len(rows), + "actors": actors, + "timeline": rows, + } + + +def time_range(index: dict[str, Any], start: str, end: str) -> dict[str, Any]: + start_dt = parse_ts(start) + end_dt = parse_ts(end) + rows = [ + event + for event in index["all"] + if start_dt <= parse_ts(event["timestamp"]) <= end_dt + ] + return { + "query": {"start": start, "end": end}, + "count": len(rows), + "timeline": rows, + } + + +def demo_events() -> list[dict]: + base = now_utc() - timedelta(days=7) + + def at(minutes: int) -> str: + return (base + timedelta(minutes=minutes)).isoformat() + + return [ + { + "type": "lane_enter", + "agent": "agent:atlas", + "agent_id": "agent_atlas_ghi789", + "owner_unit": "design-research", + "lane": "auth-refactor", + "lane_type": "feature", + "repos": ["grip", "premium"], + "timestamp": at(0), + }, + { + "type": "lease_acquire", + "agent": "agent:atlas", + "agent_id": "agent_atlas_ghi789", + "owner_unit": "design-research", + "lane": "auth-refactor", + "lane_type": "feature", + "lease_mode": "edit", + "repos": ["grip", "premium"], + "timestamp": at(2), + }, + { + "type": "lane_enter", + "agent": "agent:sentinel", + "agent_id": "agent_sentinel_def456", + "owner_unit": "qa-sentinel", + "lane": "backend-review", + "lane_type": "review", + "repos": ["tests", "grip"], + "timestamp": at(5), + }, + { + "type": "lease_acquire", + "agent": "agent:sentinel", + "agent_id": "agent_sentinel_def456", + "owner_unit": "qa-sentinel", + "lane": "backend-review", + "lane_type": "review", + "lease_mode": "review", + "repos": ["tests", "grip"], + "timestamp": at(7), + }, + { + "type": "lease_release", + "agent": "agent:atlas", + "agent_id": "agent_atlas_ghi789", + "owner_unit": "design-research", + "lane": "auth-refactor", + "lane_type": "feature", + "repos": ["grip", "premium"], + "timestamp": at(45), + }, + { + "type": "lane_exit", + "agent": "agent:atlas", + "agent_id": "agent_atlas_ghi789", + "owner_unit": "design-research", + "lane": "auth-refactor", + "lane_type": "feature", + "repos": ["grip", "premium"], + "timestamp": at(47), + }, + { + "type": "lane_enter", + "agent": "agent:opus", + "agent_id": "agent_opus_abc123", + "owner_unit": "release-control", + "lane": "auth-refactor", + "lane_type": "review", + "repos": ["grip", "premium"], + "timestamp": at(60), + }, + { + "type": "lease_acquire", + "agent": "agent:opus", + "agent_id": "agent_opus_abc123", + "owner_unit": "release-control", + "lane": "auth-refactor", + "lane_type": "review", + "lease_mode": "review", + "repos": ["grip", "premium"], + "timestamp": at(62), + }, + ] + + +def write_demo_data(workspace_root: Path) -> dict[str, Any]: + path = lane_events_file(workspace_root) + rows = demo_events() + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text("") + for row in rows: + append_jsonl(path, row) + return {"path": str(path), "count": len(rows)} + + +def render_result(result: dict[str, Any], as_json: bool) -> int: + if as_json: + print(json.dumps(result, indent=2)) + return 0 + print(json.dumps(result, indent=2)) + return 0 + + +def main() -> int: + args = parse_args() + if args.command == "demo-data": + result = write_demo_data(args.workspace_root.resolve()) + print(json.dumps(result, indent=2)) + return 0 + + events = load_jsonl(lane_events_file(args.workspace_root.resolve())) + index = build_index(events) + + if args.command == "query": + if args.lane: + return render_result(lane_history(index, args.lane), args.json) + if args.actor: + return render_result(actor_history(index, args.actor), args.json) + if args.repo: + return render_result(repo_activity(index, args.repo), args.json) + if args.start and args.end: + return render_result(time_range(index, args.start, args.end), args.json) + raise SystemExit("query requires one of --lane, --actor, --repo, or --start/--end") + + raise SystemExit(f"unknown command: {args.command}") + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/gr2/python_cli/__init__.py b/gr2/python_cli/__init__.py new file mode 100644 index 0000000..8167593 --- /dev/null +++ b/gr2/python_cli/__init__.py @@ -0,0 +1,2 @@ +"""Python-first gr2 CLI package.""" + diff --git a/gr2/python_cli/__main__.py b/gr2/python_cli/__main__.py new file mode 100644 index 0000000..869a086 --- /dev/null +++ b/gr2/python_cli/__main__.py @@ -0,0 +1,6 @@ +from .app import app + + +if __name__ == "__main__": + app() + diff --git a/gr2/python_cli/app.py b/gr2/python_cli/app.py new file mode 100644 index 0000000..eecc0c1 --- /dev/null +++ b/gr2/python_cli/app.py @@ -0,0 +1,458 @@ +from __future__ import annotations + +import json +from pathlib import Path +from types import SimpleNamespace +from typing import Optional + +import typer + +from .gitops import checkout_branch, clone_repo, ensure_lane_checkout, is_git_repo, remote_origin_url, repo_dirty, stash_if_dirty +from .hooks import HookContext, apply_file_projections, load_repo_hooks, run_lifecycle_stage +from gr2.prototypes import lane_workspace_prototype as lane_proto +from gr2.prototypes import repo_maintenance_prototype as repo_proto + + +app = typer.Typer( + help="Python-first gr2 CLI. This is the production UX proving layer before Rust." +) +repo_app = typer.Typer(help="Repo maintenance and inspection") +lane_app = typer.Typer(help="Lane creation and navigation") +lease_app = typer.Typer(help="Lane lease operations") +review_app = typer.Typer(help="Review and reviewer requirement operations") +workspace_app = typer.Typer(help="Workspace bootstrap and materialization") + +app.add_typer(repo_app, name="repo") +app.add_typer(lane_app, name="lane") +lane_app.add_typer(lease_app, name="lease") +app.add_typer(review_app, name="review") +app.add_typer(workspace_app, name="workspace") + + +def _workspace_repo_spec(workspace_root: Path, repo_name: str) -> dict[str, object]: + spec = lane_proto.load_workspace_spec(workspace_root) + for repo in spec.get("repos", []): + if repo.get("name") == repo_name: + return repo + raise SystemExit(f"repo not found in workspace spec: {repo_name}") + + +def _workspace_spec_path(workspace_root: Path) -> Path: + return workspace_root / ".grip" / "workspace_spec.toml" + + +def _lane_repo_root(workspace_root: Path, owner_unit: str, lane_name: str, repo_name: str) -> Path: + return lane_proto.lane_dir(workspace_root, owner_unit, lane_name) / "repos" / repo_name + + +def _materialize_lane_repos(workspace_root: Path, owner_unit: str, lane_name: str) -> None: + lane_doc = lane_proto.load_lane_doc(workspace_root, owner_unit, lane_name) + branch_map = dict(lane_doc.get("branch_map", {})) + lane_root = lane_proto.lane_dir(workspace_root, owner_unit, lane_name) + + for repo_name in lane_doc.get("repos", []): + repo_spec = _workspace_repo_spec(workspace_root, repo_name) + source_repo_root = (workspace_root / str(repo_spec["path"])).resolve() + if not source_repo_root.exists(): + raise SystemExit(f"source repo path does not exist for lane materialization: {source_repo_root}") + target_repo_root = _lane_repo_root(workspace_root, owner_unit, lane_name, repo_name) + first_materialize = ensure_lane_checkout( + source_repo_root=source_repo_root, + target_repo_root=target_repo_root, + branch=branch_map[repo_name], + ) + hooks = load_repo_hooks(target_repo_root) + if not hooks: + continue + ctx = HookContext( + workspace_root=workspace_root, + lane_root=lane_root, + repo_root=target_repo_root, + repo_name=repo_name, + lane_owner=owner_unit, + lane_subject=repo_name, + lane_name=lane_name, + ) + apply_file_projections(hooks, ctx) + run_lifecycle_stage( + hooks, + "on_materialize", + ctx, + repo_dirty=repo_dirty(target_repo_root), + first_materialize=first_materialize, + ) + + +def _run_lane_stage(workspace_root: Path, owner_unit: str, lane_name: str, stage: str) -> None: + lane_doc = lane_proto.load_lane_doc(workspace_root, owner_unit, lane_name) + lane_root = lane_proto.lane_dir(workspace_root, owner_unit, lane_name) + for repo_name in lane_doc.get("repos", []): + repo_root = _lane_repo_root(workspace_root, owner_unit, lane_name, repo_name) + if not repo_root.exists(): + continue + branch = dict(lane_doc.get("branch_map", {})).get(repo_name) + if branch: + checkout_branch(repo_root, branch) + hooks = load_repo_hooks(repo_root) + if not hooks: + continue + ctx = HookContext( + workspace_root=workspace_root, + lane_root=lane_root, + repo_root=repo_root, + repo_name=repo_name, + lane_owner=owner_unit, + lane_subject=repo_name, + lane_name=lane_name, + ) + run_lifecycle_stage( + hooks, + stage, + ctx, + repo_dirty=repo_dirty(repo_root), + first_materialize=False, + ) + + +def _materialize_workspace_repo(workspace_root: Path, repo_spec: dict[str, object]) -> None: + repo_name = str(repo_spec["name"]) + repo_root = (workspace_root / str(repo_spec["path"])).resolve() + url = str(repo_spec.get("url", "")).strip() + first_materialize = False + if not repo_root.exists(): + if not url: + raise SystemExit(f"repo missing and no url configured for workspace materialization: {repo_name}") + first_materialize = clone_repo(url, repo_root) + elif not is_git_repo(repo_root): + raise SystemExit(f"workspace repo path exists but is not a git repo: {repo_root}") + + hooks = load_repo_hooks(repo_root) + if not hooks: + return + ctx = HookContext( + workspace_root=workspace_root, + lane_root=repo_root, + repo_root=repo_root, + repo_name=repo_name, + lane_owner="workspace", + lane_subject=repo_name, + lane_name="workspace", + ) + apply_file_projections(hooks, ctx) + run_lifecycle_stage( + hooks, + "on_materialize", + ctx, + repo_dirty=repo_dirty(repo_root), + first_materialize=first_materialize, + ) + + +def _write_workspace_spec(workspace_root: Path, repos: list[dict[str, str]], default_unit: str) -> Path: + spec_path = _workspace_spec_path(workspace_root) + spec_path.parent.mkdir(parents=True, exist_ok=True) + lines = [ + f'workspace_name = "{workspace_root.name}"', + "", + ] + for repo in repos: + lines.extend( + [ + "[[repos]]", + f'name = "{repo["name"]}"', + f'path = "{repo["path"]}"', + f'url = "{repo["url"]}"', + "", + ] + ) + lines.extend( + [ + "[[units]]", + f'name = "{default_unit}"', + f'path = "agents/{default_unit}/home"', + "repos = [" + ", ".join(f'"{repo["name"]}"' for repo in repos) + "]", + "", + ] + ) + spec_path.write_text("\n".join(lines)) + return spec_path + + +def _scan_existing_repos(workspace_root: Path) -> list[dict[str, str]]: + repos: list[dict[str, str]] = [] + for child in sorted(workspace_root.iterdir()): + if child.name.startswith("."): + continue + if child.name == "agents": + continue + if not child.is_dir(): + continue + if not is_git_repo(child): + continue + url = remote_origin_url(child) + repos.append( + { + "name": child.name, + "path": child.relative_to(workspace_root).as_posix(), + "url": url or "", + } + ) + return repos + + +def _exit(code: int) -> None: + if code != 0: + raise typer.Exit(code=code) + + +@workspace_app.command("init") +def workspace_init( + workspace_root: Path, + default_unit: str = typer.Option("default", help="Default owner unit for scanned repos"), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Create a bare workspace_spec.toml by scanning an existing directory of repos.""" + workspace_root = workspace_root.resolve() + repos = _scan_existing_repos(workspace_root) + if not repos: + raise SystemExit(f"no git repos found to initialize workspace spec under: {workspace_root}") + spec_path = _write_workspace_spec(workspace_root, repos, default_unit) + payload = { + "workspace_root": str(workspace_root), + "spec_path": str(spec_path), + "repo_count": len(repos), + "repos": repos, + "default_unit": default_unit, + } + if json_output: + typer.echo(json.dumps(payload, indent=2)) + else: + typer.echo(json.dumps(payload, indent=2)) + + +@workspace_app.command("materialize") +def workspace_materialize( + workspace_root: Path, + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Read workspace_spec.toml, clone any missing repos, and run on_materialize hooks.""" + workspace_root = workspace_root.resolve() + spec = lane_proto.load_workspace_spec(workspace_root) + materialized: list[dict[str, object]] = [] + for repo_spec in spec.get("repos", []): + _materialize_workspace_repo(workspace_root, repo_spec) + materialized.append( + { + "name": repo_spec["name"], + "path": str((workspace_root / str(repo_spec["path"])).resolve()), + } + ) + if json_output: + typer.echo(json.dumps({"workspace_root": str(workspace_root), "repos": materialized}, indent=2)) + else: + typer.echo(json.dumps({"workspace_root": str(workspace_root), "repos": materialized}, indent=2)) + + +@repo_app.command("status") +def repo_status( + workspace_root: Path, + spec: Optional[Path] = typer.Option(None, help="Path to workspace_spec.toml"), + policy: Optional[Path] = typer.Option(None, help="Optional repo maintenance policy TOML"), + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Show repo maintenance status without mutating workspace state.""" + workspace_root = workspace_root.resolve() + spec_path = (spec or workspace_root / ".grip" / "workspace_spec.toml").resolve() + spec_doc = repo_proto.read_workspace_spec(spec_path) + policy_doc = repo_proto.read_policy(policy.resolve() if policy else None) + + actions = [] + for target in repo_proto.derive_targets(workspace_root, spec_doc): + status = repo_proto.inspect_repo(target.path) + repo_policy = repo_proto.policy_for(target, policy_doc) + actions.append(repo_proto.classify(target, status, repo_policy)) + + if json_output: + typer.echo(json.dumps([item.as_dict() for item in actions], indent=2)) + else: + typer.echo(repo_proto.render_table(actions)) + + +@repo_app.command("hooks") +def repo_hooks_show( + repo_root: Path, + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Inspect parsed .gr2/hooks.toml for a repo.""" + hooks = load_repo_hooks(repo_root.resolve()) + if hooks is None: + raise typer.Exit(code=1) + if json_output: + typer.echo(json.dumps(hooks.as_dict(), indent=2)) + else: + typer.echo(json.dumps(hooks.as_dict(), indent=2)) + + +@lane_app.command("create") +def lane_create( + workspace_root: Path, + owner_unit: str, + lane_name: str, + repos: str = typer.Option(..., help="Comma-separated repo names"), + branch: str = typer.Option(..., help="Default branch or repo=branch mappings"), + lane_type: str = typer.Option("feature", "--type", help="Lane type"), + source: str = typer.Option("manual", help="Creation source label"), + command: list[str] = typer.Option(None, "--command", help="Default command for the lane"), +) -> None: + """Create a lane.""" + workspace_root = workspace_root.resolve() + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit=owner_unit, + lane_name=lane_name, + repos=repos, + branch=branch, + type=lane_type, + source=source, + default_commands=command or [], + ) + _exit(lane_proto.create_lane(ns)) + _materialize_lane_repos(workspace_root, owner_unit, lane_name) + + +@lane_app.command("enter") +def lane_enter( + workspace_root: Path, + owner_unit: str, + lane_name: str, + actor: str = typer.Option(..., help="Actor label, e.g. agent:atlas"), + notify_channel: bool = typer.Option(False, "--notify-channel"), + recall: bool = typer.Option(False, "--recall"), +) -> None: + """Enter a lane and optionally emit channel/recall-compatible events.""" + workspace_root = workspace_root.resolve() + _run_lane_stage(workspace_root, owner_unit, lane_name, "on_enter") + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit=owner_unit, + lane_name=lane_name, + actor=actor, + notify_channel=notify_channel, + recall=recall, + ) + _exit(lane_proto.enter_lane(ns)) + + +@lane_app.command("exit") +def lane_exit( + workspace_root: Path, + owner_unit: str, + actor: str = typer.Option(..., help="Actor label, e.g. human:layne"), + notify_channel: bool = typer.Option(False, "--notify-channel"), + recall: bool = typer.Option(False, "--recall"), +) -> None: + """Exit the current lane for a unit.""" + workspace_root = workspace_root.resolve() + current_doc = lane_proto.load_current_lane_doc(workspace_root, owner_unit) + lane_name = current_doc["current"]["lane_name"] + lane_doc = lane_proto.load_lane_doc(workspace_root, owner_unit, lane_name) + for repo_name in lane_doc.get("repos", []): + repo_root = _lane_repo_root(workspace_root, owner_unit, lane_name, repo_name) + if repo_root.exists(): + stash_if_dirty(repo_root, f"gr2 exit {owner_unit}/{lane_name}") + _run_lane_stage(workspace_root, owner_unit, lane_name, "on_exit") + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit=owner_unit, + actor=actor, + notify_channel=notify_channel, + recall=recall, + ) + _exit(lane_proto.exit_lane(ns)) + + +@lane_app.command("current") +def lane_current( + workspace_root: Path, + owner_unit: str, + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Show current lane and recent history for a unit.""" + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit=owner_unit, + json=json_output, + ) + _exit(lane_proto.current_lane(ns)) + + +@lease_app.command("acquire") +def lane_lease_acquire( + workspace_root: Path, + owner_unit: str, + lane_name: str, + actor: str = typer.Option(...), + mode: str = typer.Option(..., help="edit | exec | review"), + ttl_seconds: int = typer.Option(900, "--ttl-seconds"), + force: bool = typer.Option(False, "--force"), +) -> None: + """Acquire a lease for a lane.""" + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit=owner_unit, + lane_name=lane_name, + actor=actor, + mode=mode, + ttl_seconds=ttl_seconds, + force=force, + ) + _exit(lane_proto.acquire_lane_lease(ns)) + + +@lease_app.command("release") +def lane_lease_release( + workspace_root: Path, + owner_unit: str, + lane_name: str, + actor: str = typer.Option(...), +) -> None: + """Release a lease for a lane.""" + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit=owner_unit, + lane_name=lane_name, + actor=actor, + ) + _exit(lane_proto.release_lane_lease(ns)) + + +@lease_app.command("show") +def lane_lease_show(workspace_root: Path, owner_unit: str, lane_name: str) -> None: + """Show active leases for a lane.""" + ns = SimpleNamespace( + workspace_root=workspace_root, + owner_unit=owner_unit, + lane_name=lane_name, + ) + _exit(lane_proto.show_lane_leases(ns)) + + +@review_app.command("requirements") +def review_requirements( + workspace_root: Path, + repo: str, + pr_number: int, + json_output: bool = typer.Option(False, "--json", help="Emit machine-readable JSON"), +) -> None: + """Check whether compiled review requirements are satisfied for a repo and PR.""" + ns = SimpleNamespace( + workspace_root=workspace_root, + repo=repo, + pr_number=pr_number, + json=json_output, + ) + _exit(lane_proto.check_review_requirements(ns)) + + +if __name__ == "__main__": + app() diff --git a/gr2/python_cli/gitops.py b/gr2/python_cli/gitops.py new file mode 100644 index 0000000..e9e9057 --- /dev/null +++ b/gr2/python_cli/gitops.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +import subprocess +from pathlib import Path + + +def git(cwd: Path, *args: str) -> subprocess.CompletedProcess[str]: + return subprocess.run( + ["git", *args], + cwd=cwd, + capture_output=True, + text=True, + check=False, + ) + + +def is_git_repo(path: Path) -> bool: + proc = git(path, "rev-parse", "--is-inside-work-tree") + return proc.returncode == 0 and proc.stdout.strip() == "true" + + +def repo_dirty(path: Path) -> bool: + proc = git(path, "status", "--porcelain") + return proc.returncode == 0 and bool(proc.stdout.strip()) + + +def remote_origin_url(path: Path) -> str | None: + proc = git(path, "config", "--get", "remote.origin.url") + if proc.returncode != 0: + return None + value = proc.stdout.strip() + return value or None + + +def clone_repo(url: str, target_repo_root: Path) -> bool: + if target_repo_root.exists() and is_git_repo(target_repo_root): + return False + target_repo_root.parent.mkdir(parents=True, exist_ok=True) + proc = subprocess.run( + ["git", "clone", url, str(target_repo_root)], + capture_output=True, + text=True, + check=False, + ) + if proc.returncode != 0: + raise SystemExit(f"failed to clone {url} -> {target_repo_root}:\n{proc.stderr or proc.stdout}") + return True + + +def ensure_lane_checkout( + *, + source_repo_root: Path, + target_repo_root: Path, + branch: str, +) -> bool: + """Ensure a real lane checkout exists. + + Returns True if this was first materialization, False if already present. + """ + if target_repo_root.exists() and is_git_repo(target_repo_root): + return False + + target_repo_root.parent.mkdir(parents=True, exist_ok=True) + + branch_exists = git(source_repo_root, "show-ref", "--verify", f"refs/heads/{branch}").returncode == 0 + if branch_exists: + proc = git(source_repo_root, "worktree", "add", str(target_repo_root), branch) + else: + proc = git(source_repo_root, "worktree", "add", "-b", branch, str(target_repo_root), "HEAD") + + if proc.returncode != 0: + raise SystemExit( + f"failed to create lane checkout for {source_repo_root.name} on {branch}:\n{proc.stderr or proc.stdout}" + ) + return True + + +def checkout_branch(repo_root: Path, branch: str) -> None: + proc = git(repo_root, "checkout", branch) + if proc.returncode != 0: + raise SystemExit(f"failed to checkout {branch} in {repo_root}:\n{proc.stderr or proc.stdout}") + + +def stash_if_dirty(repo_root: Path, message: str) -> bool: + if not repo_dirty(repo_root): + return False + proc = git(repo_root, "stash", "push", "-u", "-m", message) + if proc.returncode != 0: + raise SystemExit(f"failed to stash dirty work in {repo_root}:\n{proc.stderr or proc.stdout}") + return True diff --git a/gr2/python_cli/hooks.py b/gr2/python_cli/hooks.py new file mode 100644 index 0000000..809b0e3 --- /dev/null +++ b/gr2/python_cli/hooks.py @@ -0,0 +1,236 @@ +from __future__ import annotations + +import dataclasses +import json +import subprocess +import tomllib +from pathlib import Path + + +VALID_IF_EXISTS = {"skip", "overwrite", "merge", "error"} +VALID_ON_FAILURE = {"block", "warn", "skip"} +VALID_WHEN = {"first_materialize", "always", "dirty", "manual"} + + +@dataclasses.dataclass(frozen=True) +class FileProjection: + kind: str + src: str + dest: str + if_exists: str = "error" + + +@dataclasses.dataclass(frozen=True) +class LifecycleHook: + stage: str + name: str + command: str + cwd: str + when: str + on_failure: str + + +@dataclasses.dataclass(frozen=True) +class RepoHooks: + repo_name: str | None + file_links: list[FileProjection] + file_copies: list[FileProjection] + on_materialize: list[LifecycleHook] + on_enter: list[LifecycleHook] + on_exit: list[LifecycleHook] + policy: dict[str, object] + path: Path + + def as_dict(self) -> dict[str, object]: + return { + "repo_name": self.repo_name, + "path": str(self.path), + "files": { + "link": [dataclasses.asdict(item) for item in self.file_links], + "copy": [dataclasses.asdict(item) for item in self.file_copies], + }, + "lifecycle": { + "on_materialize": [dataclasses.asdict(item) for item in self.on_materialize], + "on_enter": [dataclasses.asdict(item) for item in self.on_enter], + "on_exit": [dataclasses.asdict(item) for item in self.on_exit], + }, + "policy": self.policy, + } + + +@dataclasses.dataclass(frozen=True) +class HookContext: + workspace_root: Path + lane_root: Path + repo_root: Path + repo_name: str + lane_owner: str + lane_subject: str + lane_name: str + + +def hook_file(repo_root: Path) -> Path: + return repo_root / ".gr2" / "hooks.toml" + + +def load_repo_hooks(repo_root: Path) -> RepoHooks | None: + path = hook_file(repo_root) + if not path.exists(): + return None + with path.open("rb") as fh: + raw = tomllib.load(fh) + return RepoHooks( + repo_name=raw.get("repo", {}).get("name"), + file_links=_parse_projections(raw, "link"), + file_copies=_parse_projections(raw, "copy"), + on_materialize=_parse_lifecycle(raw, "on_materialize", default_on_failure="block"), + on_enter=_parse_lifecycle(raw, "on_enter", default_on_failure="warn"), + on_exit=_parse_lifecycle(raw, "on_exit", default_on_failure="warn"), + policy=dict(raw.get("policy", {})), + path=path, + ) + + +def _parse_projections(raw: dict, kind: str) -> list[FileProjection]: + items = raw.get("files", {}).get(kind, []) + results: list[FileProjection] = [] + for item in items: + if_exists = str(item.get("if_exists", "error")) + if if_exists not in VALID_IF_EXISTS: + raise SystemExit(f"invalid if_exists={if_exists} in {kind} projection") + results.append( + FileProjection( + kind=kind, + src=str(item["src"]), + dest=str(item["dest"]), + if_exists=if_exists, + ) + ) + return results + + +def _parse_lifecycle(raw: dict, stage: str, default_on_failure: str) -> list[LifecycleHook]: + items = raw.get("lifecycle", {}).get(stage, []) + results: list[LifecycleHook] = [] + for item in items: + when = str(item.get("when", "always")) + on_failure = str(item.get("on_failure", default_on_failure)) + if when not in VALID_WHEN: + raise SystemExit(f"invalid when={when} in lifecycle.{stage}") + if on_failure not in VALID_ON_FAILURE: + raise SystemExit(f"invalid on_failure={on_failure} in lifecycle.{stage}") + results.append( + LifecycleHook( + stage=stage, + name=str(item["name"]), + command=str(item["command"]), + cwd=str(item.get("cwd", "{repo_root}")), + when=when, + on_failure=on_failure, + ) + ) + return results + + +def render_path(template: str, ctx: HookContext) -> Path: + rendered = render_text(template, ctx) + return Path(rendered) + + +def render_text(template: str, ctx: HookContext) -> str: + return ( + template.replace("{workspace_root}", str(ctx.workspace_root)) + .replace("{lane_root}", str(ctx.lane_root)) + .replace("{repo_root}", str(ctx.repo_root)) + .replace("{repo_name}", ctx.repo_name) + .replace("{lane_owner}", ctx.lane_owner) + .replace("{lane_subject}", ctx.lane_subject) + .replace("{lane_name}", ctx.lane_name) + ) + + +def apply_file_projections(hooks: RepoHooks, ctx: HookContext) -> None: + for item in [*hooks.file_links, *hooks.file_copies]: + rendered_src = render_text(item.src, ctx) + src = Path(rendered_src) + if not src.is_absolute(): + src = ctx.repo_root / src + dest = render_path(item.dest, ctx) + dest.parent.mkdir(parents=True, exist_ok=True) + + if not src.exists(): + raise SystemExit(f"projection source does not exist: {src} from {hooks.path}") + + if dest.exists() or dest.is_symlink(): + if item.if_exists == "skip": + continue + if item.if_exists == "error": + raise SystemExit(f"projection conflict at {dest} from {hooks.path}") + if item.if_exists == "merge": + raise SystemExit(f"merge projections not implemented yet for {dest}") + if item.if_exists == "overwrite": + if dest.is_dir() and not dest.is_symlink(): + raise SystemExit(f"refusing to overwrite directory projection target: {dest}") + dest.unlink(missing_ok=True) + + if item.kind == "link": + dest.symlink_to(src) + else: + dest.write_bytes(src.read_bytes()) + + +def run_lifecycle_stage( + hooks: RepoHooks, + stage: str, + ctx: HookContext, + *, + repo_dirty: bool, + first_materialize: bool, +) -> None: + hooks_for_stage = { + "on_materialize": hooks.on_materialize, + "on_enter": hooks.on_enter, + "on_exit": hooks.on_exit, + }[stage] + for hook in hooks_for_stage: + if not _should_run(hook.when, repo_dirty=repo_dirty, first_materialize=first_materialize): + continue + cwd = render_path(hook.cwd, ctx) + command = render_text(hook.command, ctx) + proc = subprocess.run( + command, + cwd=cwd, + shell=True, + capture_output=True, + text=True, + ) + if proc.returncode == 0: + continue + message = json.dumps( + { + "stage": stage, + "hook": hook.name, + "cwd": str(cwd), + "command": command, + "returncode": proc.returncode, + "stdout": proc.stdout, + "stderr": proc.stderr, + }, + indent=2, + ) + if hook.on_failure == "block": + raise SystemExit(message) + if hook.on_failure == "warn": + print(message) + + +def _should_run(when: str, *, repo_dirty: bool, first_materialize: bool) -> bool: + if when == "always": + return True + if when == "first_materialize": + return first_materialize + if when == "dirty": + return repo_dirty + if when == "manual": + return False + return False