diff --git a/F1-json-flag.md b/F1-json-flag.md deleted file mode 100644 index 1fe3c18..0000000 --- a/F1-json-flag.md +++ /dev/null @@ -1,78 +0,0 @@ -# F1: --format json → --json (completed) - -Implemented finding F1 from `AGENT_NATIVE_AUDIT.md`. vers-cli now standardizes on `--json` across the entire command surface, with `--format` retained as a hidden, deprecated alias. - -## Changed files - -``` - README.md | 12 +++--- - cmd/branch.go | 12 ++++-- - cmd/commit.go | 36 ++++++++++++---- - cmd/deploy.go | 12 ++++-- - cmd/env.go | 10 +++-- - cmd/info.go | 12 ++++-- - cmd/pause.go | 12 ++++-- - cmd/repo.go | 48 ++++++++++++++++----- - cmd/resize.go | 12 ++++-- - cmd/resume.go | 12 ++++-- - cmd/run.go | 12 ++++-- - cmd/run_commit.go | 12 ++++-- - cmd/status.go | 12 ++++-- - cmd/tag.go | 24 +++++++---- - internal/presenters/format.go | 24 +++++++++-- - internal/presenters/format_test.go | 37 +++++++++++++---- - 16 files changed, 224 insertions(+), 75 deletions(-) -``` - -19 cobra `--format` registrations now have a sibling `--json` BoolVar; 19 `pres.ParseFormat` call sites now pass the JSON flag and propagate the validation error. - -Commit: `6744100 feat: standardize on --json flag (deprecate --format json)` on branch `pi-parallel-d95f3d3f-0`. - -## Design choices - -1. **Centralized validation in `presenters.ParseFormat`.** New signature: `ParseFormat(quiet, jsonFlag bool, formatStr string) (OutputFormat, error)`. Precedence is `quiet > json > format > default`. Invalid `--format` values return an enumerated error rather than silently falling through to default-format (which was the previous behavior — a P3 violation). -2. **Cobra's `MarkDeprecated` handles the warning.** It prints `Flag --format has been deprecated, use --json instead` to stderr automatically when the flag is used, and hides the flag from `--help`. No custom warning code needed. -3. **Stdout/stderr separation preserved.** Deprecation warnings go to stderr; JSON data continues to land cleanly on stdout. `vers status --format json | jq` still works without contamination. - -## Validation - -- `go build ./...` — clean -- `go vet ./...` — clean -- `go test ./...` — all packages pass (including expanded `format_test.go` with 9 cases covering the new precedence and error semantics) - -Smoke tests against built binary: - -``` -$ vers status --json -[] # exit 0, clean stdout - -$ vers status --format json 2>/tmp/err -[] # exit 0, JSON on stdout -$ cat /tmp/err -Flag --format has been deprecated, use --json instead - -$ vers status --format yaml -Flag --format has been deprecated, use --json instead -Error: --format must be "json" (got: "yaml"). Note: --format is deprecated, use --json instead - # exit 1 -``` - -`vers status --help` no longer lists `--format`; long descriptions everywhere now read "Use --json for machine-readable output." - -## Risks / open questions - -- **Usage banner printed on RunE error.** When `--format yaml` is rejected, cobra prints the command's usage block after the error. This is pre-existing behavior — `SilenceUsage = true` is only set in the auth path in `cmd/root.go:210`. Out of scope for F1, but worth a follow-up if cleaner agent-facing errors are wanted (set `SilenceUsage = true` on the root command). -- **`alias` command is still missing JSON output entirely.** This is finding F2, separate task. -- **No tests assert deprecation warning emission to stderr.** The cobra `MarkDeprecated` behavior is library-provided and stable, so I didn't add an integration test capturing stderr; the unit tests exercise only the validation path. If desired, a small `cmd/`-level test could capture stderr from a `--format json` invocation. - -## Recommended next step - -Hand off to F2 (info → get) and F3 (pagination) as planned. After both land, audit the few commands that have a `--quiet` flag but no `--json` (`alias`, possibly `head`, `checkout`) and unify those — the `ParseFormat` machinery is already in place to extend them cheaply. - ---- - -Implemented F1 (canonical `--json` flag). -Changed files: 16 (13 cmd files, presenters package, README). -Validation: `go build`, `go vet`, `go test ./...` all clean; manual smoke tests confirm `--json` works, legacy `--format json` works with stderr deprecation, invalid `--format` values error with the enumerated message. -Open risks/questions: usage banner on error is pre-existing; no integration test for deprecation warning. -Recommended next step: proceed with F2 and F3 as planned. diff --git a/F7-pagination.md b/F7-pagination.md deleted file mode 100644 index 35381d4..0000000 --- a/F7-pagination.md +++ /dev/null @@ -1,100 +0,0 @@ -# F7: Pagination + Truncation Hints - -Branch: `pi-parallel-d95f3d3f-2` -Commit: `bc31992 feat: paginate list commands with --limit and truncation hints` - -## Implemented - -Added `--limit N` (default 50; `0` = unbounded) and `--offset N` to every list-style command, plus a uniform truncation hint surface: - -| Command | File | Notes | -|---|---|---| -| `vers status` | `cmd/status.go` | List mode only. Single-VM mode unchanged. | -| `vers commit list` | `cmd/commit.go` | | -| `vers repo list` | `cmd/repo.go` | | -| `vers repo tag list` | `cmd/repo.go` | Included for surface consistency (was implicit in scope). | -| `vers tag list` | `cmd/tag.go` | | -| `vers env list` | `cmd/env.go` | Sorted by key for stable pagination. JSON wraps as `[{key,value}]` envelope when truncated to preserve order; bare object map when not truncated. | -| `vers alias` (no arg) | `cmd/alias.go` | Sorted by name. | - -## Output shapes - -**JSON, not truncated** — bare items array (backwards-compat, identical to pre-change shape): -```json -[ {...}, {...} ] -``` - -**JSON, truncated** — envelope with hint and next_offset: -```json -{ - "items": [ {...} ], - "total": 11, - "limit": 3, - "offset": 0, - "truncated": true, - "next_offset": 3, - "hint": "showing 3 of 11 — use --limit=N (0 for all) or --offset=3 for the next page" -} -``` - -**Text / quiet modes** — table or IDs on stdout, single-line hint on stderr: -``` -$ vers status -q --limit 1 -6781f925-09ba-48ab-8f2c-9adeb75eec6d -[stderr] (showing 1 of 2 — use --limit=N (0 for all) or --offset=1 for the next page) -``` - -## Implementation notes - -- New file `internal/presenters/pagination.go` exposes: - - `PageInfo` struct (Total/Limit/Offset/Truncated/NextOffset/Hint) - - `ApplyPaging(total, limit, offset) (start, end, info)` — clamps and computes - - `PrintListJSON(items, info)` — bare array vs. envelope based on `info.Truncated` - - `PrintTruncationHint(w, info)` — no-op when not truncated; otherwise `(hint)\n` -- Unit tested in `internal/presenters/pagination_test.go` (10 sub-cases covering empty/full/truncated/unbounded/offset-past-end/negative offsets). - -## Server-side pagination caveat - -The Go SDK (`vers-sdk-go@v0.1.0-alpha.32`) **does not expose** `Limit`/`Offset`/`Cursor` query parameters on its list endpoints today. The `ListCommitsResponse` shape *includes* `Limit`/`Offset`/`Total` fields, suggesting the API server may accept them, but the SDK has no typed param surface for them. - -Per the task instructions, I implemented client-side pagination after the full response. Marked clearly: - -- One canonical TODO at the top of `internal/presenters/pagination.go` describing the constraint. -- Inline TODO comments at every call site (`cmd/status.go`, `cmd/commit.go`, `cmd/repo.go`, `cmd/tag.go`, `cmd/env.go`) noting where to plumb `--limit`/`--offset` to the SDK once supported. - -When the SDK exposes the params, the change is local: replace the `ApplyPaging` block with a request that passes `limit`/`offset` through (e.g., via `option.WithQuery`), receives an already-paged response, and uses the API's `total` field instead of `len(items)` to detect truncation. - -## Validation - -- `go build ./...` — passes -- `go vet ./...` — clean -- `go test ./...` — all packages pass, including new `TestApplyPaging` (10 sub-cases) -- Live API checks against `/Users/tynandaly/basin/vers-cli`'s authenticated environment: - - `vers status --limit 1 --format json` → returns 1 of 2 VMs with truncation envelope. ✓ - - `vers status --limit 1` text mode → table to stdout, hint to stderr. ✓ - - `vers status -q --limit 1` quiet mode → ID to stdout, hint to stderr. ✓ - - `vers status --limit 0` unbounded → bare array, no envelope. ✓ - - `vers commit list --limit 3 --format json` → envelope with `total: 11`, `next_offset: 3`. ✓ - - `vers commit list --limit 3 --offset 3 --format json` → next page works. ✓ - - `vers commit list --help` → new flags documented. ✓ - -## Out of scope (left untouched) - -- `--format json` → `--json` rename (separate worktree handles F1). -- `info` → `get` rename (separate worktree handles F8). -- Existing `--format string` flags retained as-is on every list command. -- Tests in `internal/handlers/` and `internal/mcp/` were not modified — handler signatures unchanged because pagination lives in the cmd layer. - -## Open questions / risks - -1. **JSON shape change for `env list` when truncated.** Historical shape was `{"KEY": "VALUE", ...}` (an object). The truncated envelope uses `items: [{key, value}, ...]` because Go map iteration order is unstable, and a sorted ordered list is required for stable `next_offset` paging. When *not* truncated, the historical map shape is preserved. Agents that hit the truncated branch will see a different shape — flagged here in case downstream MCP tooling depends on the map form. - -2. **`commit list` text-mode header.** `RenderCommitsList` prints `"%d commit(s)"` from `view.Total`, which is the API-reported total, not the paged count. Left as-is because the API total is more useful information; the paged count is implicit in the row count plus the stderr hint. If desired, this could be tweaked to `"showing N of TOTAL commits"`. - -3. **`repo list` / `tag list` / `repo tag list` have no API-reported total.** The truncation total is `len(response)` — i.e., what we got back from the API. If the server is silently capping the response, the agent would see a wrong total. Low risk today (these endpoints appear to return everything), but worth verifying once server-side pagination lands. - -## Recommended next steps - -1. **Coordinate the merge order with the F1 (`--format` → `--json`) and F8 (`info` → `get`) worktrees.** This branch only touches `--limit`/`--offset` and the `info` lines in renderers/help; conflicts should be limited to flag-init blocks and Long-help text, all easy three-way merges. -2. **Open an issue in `vers-sdk-go`** requesting typed `Limit`/`Offset` parameters on every list endpoint so the client-side trim can be removed and `total`/`next_offset` come from the API. -3. **Add an MCP-tool wrapper note** so the existing MCP server surfaces `--limit`/`--offset` in tool descriptions for agent consumers (out of scope for F7 itself; deserves a dedicated follow-up). diff --git a/README.md b/README.md index dfc339d..11c59e7 100644 --- a/README.md +++ b/README.md @@ -132,6 +132,29 @@ vers tag delete vers tag delete ``` +### Feedback + +Report friction (or anything else) about the CLI. Entries are appended to a +local JSONL journal at `~/.vers/feedback.jsonl`. + +```bash +# Record locally +vers feedback "the --tier flag rejects 'enterprise' but docs list it as valid" + +# List recent entries +vers feedback list +vers feedback list --limit 5 --json + +# Opt-in upstream delivery: when VERS_FEEDBACK_ENDPOINT is set, the entry is +# also POSTed there (application/json, 5s timeout). Failures are logged to +# stderr but the local journal entry is still written. +VERS_FEEDBACK_ENDPOINT=https://example.com/cli-feedback \ + vers feedback "race condition in --wait when job completes during first poll" +``` + +Override the journal path with `VERS_FEEDBACK_PATH` (primarily for testing). + + ### Shell Composition Commands with `-q` output are designed to compose with standard Unix tools: @@ -159,6 +182,24 @@ vers get --json | jq '.ip' vers ps -q ``` +### Job Ledger + +Every `--wait` invocation of `run`, `branch`, `deploy`, `resume`, or `run-commit` +appends an entry to a durable JSONL ledger at `~/.vers/jobs.jsonl` (override +with `VERS_JOBS_DIR`). Use `vers jobs` to introspect: + +```bash +vers jobs list --json # all jobs as JSON +vers jobs list --status failed # only failed jobs +vers jobs get job_ # full record for one job +vers jobs prune --older-than 7d # trim entries older than 7 days +vers jobs prune --all --dry-run # preview clearing the ledger +``` + +Phase 1 ships journaling only. The ledger is written best-effort: a write +failure never causes the underlying command to fail. Resumption of in-flight +jobs is not yet implemented. + ## Configuration diff --git a/cmd/agent_context.go b/cmd/agent_context.go new file mode 100644 index 0000000..c3ccbcd --- /dev/null +++ b/cmd/agent_context.go @@ -0,0 +1,210 @@ +package cmd + +import ( + "encoding/json" + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +// agentContextSchemaVersion is bumped whenever the agent-context JSON shape +// changes in a backwards-incompatible way. +const agentContextSchemaVersion = "1" + +// agentContextEnumAnnotationPrefix is the cobra Annotations key prefix used to +// declare a known enum set for a flag. Example: +// +// cmd.Annotations["vers:enum:visibility"] = "public,private,unlisted" +const agentContextEnumAnnotationPrefix = "vers:enum:" + +var agentContextPretty bool + +var agentContextCmd = &cobra.Command{ + Use: "agent-context", + Short: "Emit a versioned JSON description of the CLI for agent consumption", + Long: `Emit a versioned, machine-readable JSON description of every command, +subcommand, and flag exposed by the CLI. + +Agents should consume this output instead of parsing --help text. The top-level +"schema_version" field lets consumers detect breaking shape changes.`, + SilenceUsage: true, + SilenceErrors: true, + Run: func(cmd *cobra.Command, args []string) { + doc := buildAgentContext(cmd.Root()) + + var ( + out []byte + err error + ) + if agentContextPretty { + out, err = json.MarshalIndent(doc, "", " ") + } else { + out, err = json.Marshal(doc) + } + if err != nil { + // Per spec this command must never fail; fall back to a minimal + // stub document and exit 0. + fmt.Fprintln(os.Stdout, `{"schema_version":"`+agentContextSchemaVersion+`","commands":{}}`) + return + } + fmt.Fprintln(os.Stdout, string(out)) + }, +} + +func init() { + agentContextCmd.Flags().BoolVar(&agentContextPretty, "pretty", false, "Emit indented JSON instead of compact JSON") + rootCmd.AddCommand(agentContextCmd) +} + +// agentContextDoc is the top-level JSON shape emitted by `vers agent-context`. +type agentContextDoc struct { + SchemaVersion string `json:"schema_version"` + CLI agentContextCLI `json:"cli"` + Commands map[string]*agentContextCommand `json:"commands"` + AvailableProfiles []string `json:"available_profiles"` + Feedback agentContextFeedback `json:"feedback"` +} + +type agentContextCLI struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` +} + +type agentContextCommand struct { + Use string `json:"use"` + Short string `json:"short"` + Long string `json:"long,omitempty"` + Aliases []string `json:"aliases,omitempty"` + Args agentContextArgs `json:"args"` + Async bool `json:"async"` + Flags map[string]*agentContextFlag `json:"flags"` + Subcommands map[string]*agentContextCommand `json:"subcommands,omitempty"` +} + +type agentContextArgs struct { + Min int `json:"min"` + Max int `json:"max"` +} + +type agentContextFlag struct { + Shorthand string `json:"shorthand,omitempty"` + Type string `json:"type"` + Default string `json:"default"` + Usage string `json:"usage"` + Required bool `json:"required"` + Enum []string `json:"enum,omitempty"` +} + +type agentContextFeedback struct { + LocalPath string `json:"local_path"` + EndpointConfigured bool `json:"endpoint_configured"` +} + +func buildAgentContext(root *cobra.Command) *agentContextDoc { + doc := &agentContextDoc{ + SchemaVersion: agentContextSchemaVersion, + CLI: agentContextCLI{ + Name: root.Name(), + Version: Version, + Description: strings.TrimSpace(root.Long), + }, + Commands: map[string]*agentContextCommand{}, + AvailableProfiles: []string{}, + Feedback: agentContextFeedback{ + LocalPath: "~/.vers/feedback.jsonl", + EndpointConfigured: os.Getenv("VERS_FEEDBACK_ENDPOINT") != "", + }, + } + + for _, c := range root.Commands() { + if shouldSkipCommand(c) { + continue + } + doc.Commands[c.Name()] = describeCommand(c) + } + return doc +} + +func shouldSkipCommand(c *cobra.Command) bool { + if c.Hidden { + return true + } + switch c.Name() { + case "help", "completion": + return true + } + return false +} + +func describeCommand(c *cobra.Command) *agentContextCommand { + out := &agentContextCommand{ + Use: c.Use, + Short: c.Short, + Long: strings.TrimSpace(c.Long), + Args: agentContextArgs{Min: 0, Max: -1}, + Flags: map[string]*agentContextFlag{}, + } + if len(c.Aliases) > 0 { + out.Aliases = append(out.Aliases, c.Aliases...) + } + + // Collect flags (local + inherited from parents), excluding hidden & + // deprecated. Inherited flags give agents a complete picture without + // having to re-walk the parent chain themselves. + c.Flags().VisitAll(func(f *pflag.Flag) { + if entry := describeFlag(c, f); entry != nil { + out.Flags["--"+f.Name] = entry + } + }) + + // async = visible --wait flag exists. + if w := c.Flags().Lookup("wait"); w != nil && !w.Hidden && len(w.Deprecated) == 0 { + out.Async = true + } + + for _, sub := range c.Commands() { + if shouldSkipCommand(sub) { + continue + } + if out.Subcommands == nil { + out.Subcommands = map[string]*agentContextCommand{} + } + out.Subcommands[sub.Name()] = describeCommand(sub) + } + + return out +} + +func describeFlag(parent *cobra.Command, f *pflag.Flag) *agentContextFlag { + if f.Hidden || len(f.Deprecated) > 0 { + return nil + } + entry := &agentContextFlag{ + Shorthand: f.Shorthand, + Type: f.Value.Type(), + Default: f.DefValue, + Usage: f.Usage, + } + if _, ok := f.Annotations[cobra.BashCompOneRequiredFlag]; ok { + entry.Required = true + } + if parent != nil && parent.Annotations != nil { + if raw, ok := parent.Annotations[agentContextEnumAnnotationPrefix+f.Name]; ok && raw != "" { + parts := strings.Split(raw, ",") + vals := make([]string, 0, len(parts)) + for _, p := range parts { + if v := strings.TrimSpace(p); v != "" { + vals = append(vals, v) + } + } + if len(vals) > 0 { + entry.Enum = vals + } + } + } + return entry +} diff --git a/cmd/agent_context_test.go b/cmd/agent_context_test.go new file mode 100644 index 0000000..f74cbb9 --- /dev/null +++ b/cmd/agent_context_test.go @@ -0,0 +1,99 @@ +package cmd + +import ( + "encoding/json" + "testing" +) + +func TestAgentContextSchemaVersion(t *testing.T) { + doc := buildAgentContext(rootCmd) + if doc.SchemaVersion != "1" { + t.Fatalf("schema_version = %q, want %q", doc.SchemaVersion, "1") + } + if doc.CLI.Name != "vers" { + t.Errorf("cli.name = %q, want %q", doc.CLI.Name, "vers") + } + if doc.AvailableProfiles == nil { + t.Errorf("available_profiles must be non-nil (empty slice)") + } + if doc.Feedback.LocalPath == "" { + t.Errorf("feedback.local_path should be populated") + } +} + +func TestAgentContextWalksTopLevelCommands(t *testing.T) { + doc := buildAgentContext(rootCmd) + + // Every visible top-level command (minus help/completion) must be present. + for _, c := range rootCmd.Commands() { + if shouldSkipCommand(c) { + continue + } + if _, ok := doc.Commands[c.Name()]; !ok { + t.Errorf("commands[%q] missing from agent-context output", c.Name()) + } + } + + // help and completion must be skipped. + if _, ok := doc.Commands["help"]; ok { + t.Errorf("commands[help] should be skipped") + } + if _, ok := doc.Commands["completion"]; ok { + t.Errorf("commands[completion] should be skipped") + } +} + +func TestAgentContextEmitsWellFormedJSON(t *testing.T) { + doc := buildAgentContext(rootCmd) + raw, err := json.Marshal(doc) + if err != nil { + t.Fatalf("json.Marshal failed: %v", err) + } + // Round-trip through generic decoder to confirm shape. + var back map[string]any + if err := json.Unmarshal(raw, &back); err != nil { + t.Fatalf("json.Unmarshal round-trip failed: %v", err) + } + for _, key := range []string{"schema_version", "cli", "commands", "available_profiles", "feedback"} { + if _, ok := back[key]; !ok { + t.Errorf("emitted JSON missing top-level key %q", key) + } + } +} + +func TestAgentContextRunIsAsync(t *testing.T) { + doc := buildAgentContext(rootCmd) + run, ok := doc.Commands["run"] + if !ok { + t.Fatalf("commands[run] missing") + } + if !run.Async { + t.Errorf("run.async = false, want true (run exposes --wait)") + } +} + +func TestAgentContextEnumAnnotationHook(t *testing.T) { + // Find any subcommand to attach a synthetic annotation to. We use a + // fresh cobra-style fake by piggybacking on agent-context itself: it + // has a --pretty bool flag we can decorate. The hook is purely a + // description-time mechanism, so this exercises the path without + // mutating production commands. + prev := agentContextCmd.Annotations + t.Cleanup(func() { agentContextCmd.Annotations = prev }) + agentContextCmd.Annotations = map[string]string{ + agentContextEnumAnnotationPrefix + "pretty": "true,false", + } + + doc := buildAgentContext(rootCmd) + ac, ok := doc.Commands["agent-context"] + if !ok { + t.Fatalf("commands[agent-context] missing") + } + flag, ok := ac.Flags["--pretty"] + if !ok { + t.Fatalf("flags[--pretty] missing") + } + if len(flag.Enum) != 2 || flag.Enum[0] != "true" || flag.Enum[1] != "false" { + t.Errorf("flag.Enum = %v, want [true false]", flag.Enum) + } +} diff --git a/cmd/branch.go b/cmd/branch.go index 8c2a443..19e348a 100644 --- a/cmd/branch.go +++ b/cmd/branch.go @@ -2,8 +2,11 @@ package cmd import ( "context" + "os" + "strings" "github.com/hdresearch/vers-cli/internal/handlers" + "github.com/hdresearch/vers-cli/internal/jobs" pres "github.com/hdresearch/vers-cli/internal/presenters" "github.com/spf13/cobra" ) @@ -35,6 +38,14 @@ Use --wait to block until new VMs are running.`, if len(args) > 0 { target = args[0] } + var jobID string + if branchWait { + jobID, _ = jobs.Submit(jobs.Submission{ + Kind: "vm.branch", + Command: "vers branch --wait", + Args: os.Args[1:], + }) + } res, err := handlers.HandleBranch(apiCtx, application, handlers.BranchReq{ Target: target, Alias: alias, @@ -43,8 +54,18 @@ Use --wait to block until new VMs are running.`, Wait: branchWait, }) if err != nil { + if branchWait { + _ = jobs.Fail(jobID, err) + } return err } + if branchWait { + result := res.NewID + if result == "" && len(res.NewIDs) > 0 { + result = strings.Join(res.NewIDs, ",") + } + _ = jobs.Complete(jobID, result) + } format, err := pres.ParseFormat(false, branchJSON, branchFormat) if err != nil { diff --git a/cmd/deploy.go b/cmd/deploy.go index 419198d..9869df8 100644 --- a/cmd/deploy.go +++ b/cmd/deploy.go @@ -2,8 +2,10 @@ package cmd import ( "context" + "os" "github.com/hdresearch/vers-cli/internal/handlers" + "github.com/hdresearch/vers-cli/internal/jobs" pres "github.com/hdresearch/vers-cli/internal/presenters" "github.com/spf13/cobra" ) @@ -56,10 +58,24 @@ Examples: Wait: deployWait, } + var jobID string + if deployWait { + jobID, _ = jobs.Submit(jobs.Submission{ + Kind: "vm.deploy", + Command: "vers deploy --wait", + Args: os.Args[1:], + }) + } view, err := handlers.HandleDeploy(apiCtx, application, req) if err != nil { + if deployWait { + _ = jobs.Fail(jobID, err) + } return err } + if deployWait { + _ = jobs.Complete(jobID, view.VmID) + } format, err := pres.ParseFormat(false, deployJSON, deployFormat) if err != nil { diff --git a/cmd/feedback.go b/cmd/feedback.go new file mode 100644 index 0000000..66b5d1c --- /dev/null +++ b/cmd/feedback.go @@ -0,0 +1,217 @@ +package cmd + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/hdresearch/vers-cli/internal/feedback" + pres "github.com/hdresearch/vers-cli/internal/presenters" + "github.com/spf13/cobra" +) + +// Flags for `vers feedback `. +var ( + feedbackJSON bool +) + +// Flags for `vers feedback list`. +var ( + feedbackListJSON bool + feedbackListLimit int + feedbackListOffset int +) + +// feedbackResult is the JSON envelope emitted by `vers feedback --json`. +type feedbackResult struct { + ID string `json:"id"` + Timestamp string `json:"timestamp"` + VersVersion string `json:"vers_version"` + Message string `json:"message"` + SentUpstream bool `json:"sent_upstream"` + UpstreamStatus int `json:"upstream_status,omitempty"` + UpstreamError string `json:"upstream_error,omitempty"` + JournalPath string `json:"journal_path"` +} + +var feedbackCmd = &cobra.Command{ + Use: "feedback ", + Short: "Report friction or feedback about the vers CLI", + Long: `Record a feedback entry locally (and optionally POST upstream). + +Each entry is appended to a JSONL journal at ~/.vers/feedback.jsonl. When the +VERS_FEEDBACK_ENDPOINT environment variable is set, the entry is also POSTed +to that URL as application/json with a 5s timeout. Upstream delivery is +best-effort: a failed POST does not fail the command — the local journal +entry is still written. + +Use 'vers feedback list' to inspect recent entries. + +Use --json for machine-readable output. + +Examples: + vers feedback "the --tier flag rejects 'enterprise' but docs list it as valid" + VERS_FEEDBACK_ENDPOINT=https://example.com/cli-feedback vers feedback "race in --wait" + vers feedback list --limit 5 --json`, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + message := strings.TrimSpace(strings.Join(args, " ")) + if message == "" { + return fmt.Errorf("feedback message must not be empty. Usage: vers feedback ") + } + + format, err := pres.ParseFormat(false, feedbackJSON, "") + if err != nil { + return err + } + + path, err := feedback.DefaultPath() + if err != nil { + return err + } + + id, err := feedback.NewID() + if err != nil { + return err + } + + entry := feedback.Entry{ + ID: id, + Timestamp: feedback.Now(), + VersVersion: Version, + Message: message, + SentUpstream: false, + } + + // Try upstream POST first (if configured) so we can record the result + // in the journal. A failure here is non-fatal. + endpoint := strings.TrimSpace(os.Getenv(feedback.EndpointEnvVar)) + var upstreamStatus int + var upstreamErr error + if endpoint != "" { + upstreamStatus, upstreamErr = feedback.PostUpstream(context.Background(), endpoint, entry) + if upstreamErr == nil { + entry.SentUpstream = true + } + } + + // Always write the local journal entry, even if the upstream POST failed. + if err := feedback.Append(path, entry); err != nil { + return fmt.Errorf("write feedback journal: %w", err) + } + + result := feedbackResult{ + ID: entry.ID, + Timestamp: entry.Timestamp, + VersVersion: entry.VersVersion, + Message: entry.Message, + SentUpstream: entry.SentUpstream, + UpstreamStatus: upstreamStatus, + JournalPath: path, + } + if upstreamErr != nil { + result.UpstreamError = upstreamErr.Error() + } + + // Emit the upstream-failed warning to stderr in both text and JSON modes + // so agents see it without polluting machine-readable stdout. + if endpoint != "" && upstreamErr != nil { + fmt.Fprintf(cmd.ErrOrStderr(), + "feedback recorded locally; upstream POST failed: %s\n", upstreamErr.Error()) + } + + switch format { + case pres.FormatJSON: + return pres.PrintJSON(result) + default: + switch { + case endpoint == "": + fmt.Fprintln(cmd.OutOrStdout(), "feedback recorded locally (1 entry)") + case upstreamErr == nil: + fmt.Fprintf(cmd.OutOrStdout(), + "feedback recorded locally and sent upstream (status: %d)\n", upstreamStatus) + default: + // Warning already went to stderr; confirm the local write on stdout. + fmt.Fprintln(cmd.OutOrStdout(), "feedback recorded locally (1 entry)") + } + } + return nil + }, +} + +var feedbackListCmd = &cobra.Command{ + Use: "list", + Short: "List recent feedback entries from the local journal", + Long: `Read recent entries from the local feedback journal (~/.vers/feedback.jsonl). + +Pagination: + --limit N Cap results at N (default 10). Use 0 for unbounded. + --offset N Skip the first N results (use with --limit to page). + +Entries are returned newest-first. When the result is truncated, a hint with +--offset for the next page is printed to stderr (text mode) or included in +the JSON envelope. + +Use --json for machine-readable output.`, + Args: cobra.NoArgs, + RunE: func(cmd *cobra.Command, args []string) error { + format, err := pres.ParseFormat(false, feedbackListJSON, "") + if err != nil { + return err + } + + path, err := feedback.DefaultPath() + if err != nil { + return err + } + + entries, err := feedback.ReadAll(path) + if err != nil { + return err + } + + // Newest-first ordering. + reversed := make([]feedback.Entry, len(entries)) + for i, e := range entries { + reversed[len(entries)-1-i] = e + } + + start, end, info := pres.ApplyPaging(len(reversed), feedbackListLimit, feedbackListOffset) + paged := reversed[start:end] + + switch format { + case pres.FormatJSON: + // Always emit a non-nil items slice so JSON consumers see [] not null. + if paged == nil { + paged = []feedback.Entry{} + } + return pres.PrintListJSON(paged, info) + default: + if len(paged) == 0 { + fmt.Fprintln(cmd.OutOrStdout(), "No feedback entries.") + return nil + } + for _, e := range paged { + upstream := "local" + if e.SentUpstream { + upstream = "local+upstream" + } + fmt.Fprintf(cmd.OutOrStdout(), "%s %s [%s] %s\n", + e.Timestamp, e.ID, upstream, e.Message) + } + pres.PrintTruncationHint(cmd.ErrOrStderr(), info) + } + return nil + }, +} + +func init() { + rootCmd.AddCommand(feedbackCmd) + feedbackCmd.Flags().BoolVar(&feedbackJSON, "json", false, "Output as JSON") + + feedbackCmd.AddCommand(feedbackListCmd) + feedbackListCmd.Flags().BoolVar(&feedbackListJSON, "json", false, "Output as JSON") + feedbackListCmd.Flags().IntVar(&feedbackListLimit, "limit", 10, "Maximum number of entries to return (0 = unbounded)") + feedbackListCmd.Flags().IntVar(&feedbackListOffset, "offset", 0, "Number of entries to skip (for paging)") +} diff --git a/cmd/jobs.go b/cmd/jobs.go new file mode 100644 index 0000000..eadcb33 --- /dev/null +++ b/cmd/jobs.go @@ -0,0 +1,205 @@ +package cmd + +import ( + "fmt" + "text/tabwriter" + "time" + + "github.com/hdresearch/vers-cli/internal/jobs" + pres "github.com/hdresearch/vers-cli/internal/presenters" + "github.com/spf13/cobra" +) + +var jobsCmd = &cobra.Command{ + Use: "jobs", + Short: "Manage the durable job ledger", + Long: `View and manage the local job ledger. + +Every '--wait' invocation of an async-submitting command (run, branch, deploy, +resume, run-commit) appends entries to a JSONL ledger at ~/.vers/jobs.jsonl +(override with VERS_JOBS_DIR). This command reads, summarises, and prunes that +ledger. + +Note: Phase 1 ships journaling only — entries are written for audit and +introspection. Resumption of in-flight jobs is not yet implemented.`, + Run: func(cmd *cobra.Command, args []string) { + _ = cmd.Help() + }, +} + +// --- jobs list --- + +var ( + jobsListJSON bool + jobsListLimit int + jobsListOffset int + jobsListStatus string +) + +var jobsListCmd = &cobra.Command{ + Use: "list", + Short: "List recent jobs", + Long: `List the latest state of every job in the ledger, most recent first. + +Use --status to filter to one of: submitted, complete, failed. +Use --limit and --offset to page (default --limit 50, 0 = unbounded).`, + RunE: func(cmd *cobra.Command, args []string) error { + if jobsListStatus != "" { + switch jobsListStatus { + case jobs.StatusSubmitted, jobs.StatusComplete, jobs.StatusFailed: + default: + return fmt.Errorf("--status must be one of: submitted, complete, failed (got: %q)", jobsListStatus) + } + } + entries, err := jobs.Latest(jobsListStatus) + if err != nil { + return err + } + + start, end, info := pres.ApplyPaging(len(entries), jobsListLimit, jobsListOffset) + paged := entries[start:end] + + if jobsListJSON { + items := make([]jobs.Entry, len(paged)) + copy(items, paged) + return pres.PrintListJSON(items, info) + } + + if len(paged) == 0 { + fmt.Fprintln(cmd.OutOrStdout(), "No jobs recorded.") + pres.PrintTruncationHint(cmd.ErrOrStderr(), info) + return nil + } + w := tabwriter.NewWriter(cmd.OutOrStdout(), 0, 0, 2, ' ', 0) + fmt.Fprintln(w, "JOB_ID\tSTATUS\tKIND\tSTARTED\tDURATION") + for _, e := range paged { + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", + e.ID, + e.Status, + e.Kind, + e.StartedAt.Local().Format(time.RFC3339), + formatDuration(e), + ) + } + w.Flush() + pres.PrintTruncationHint(cmd.ErrOrStderr(), info) + return nil + }, +} + +func formatDuration(e jobs.Entry) string { + if e.DurationMs != nil { + d := time.Duration(*e.DurationMs) * time.Millisecond + return d.Truncate(time.Millisecond).String() + } + if e.Status == jobs.StatusSubmitted { + return "—" + } + return "" +} + +// --- jobs get --- + +var jobsGetJSON bool + +var jobsGetCmd = &cobra.Command{ + Use: "get ", + Short: "Show one job entry", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + entry, ok, err := jobs.Get(args[0]) + if err != nil { + return err + } + if !ok { + cmd.SilenceUsage = true + // errorsx maps "not found" -> ExitNotFound (3). + return fmt.Errorf("job not found: %s", args[0]) + } + if jobsGetJSON { + return pres.PrintJSON(entry) + } + w := tabwriter.NewWriter(cmd.OutOrStdout(), 0, 0, 2, ' ', 0) + fmt.Fprintf(w, "ID:\t%s\n", entry.ID) + fmt.Fprintf(w, "Kind:\t%s\n", entry.Kind) + fmt.Fprintf(w, "Command:\t%s\n", entry.Command) + if len(entry.Args) > 0 { + fmt.Fprintf(w, "Args:\t%v\n", entry.Args) + } + fmt.Fprintf(w, "Status:\t%s\n", entry.Status) + fmt.Fprintf(w, "Started:\t%s\n", entry.StartedAt.Local().Format(time.RFC3339)) + if entry.CompletedAt != nil { + fmt.Fprintf(w, "Completed:\t%s\n", entry.CompletedAt.Local().Format(time.RFC3339)) + } + if entry.DurationMs != nil { + fmt.Fprintf(w, "Duration:\t%s\n", time.Duration(*entry.DurationMs)*time.Millisecond) + } + if entry.ResultID != "" { + fmt.Fprintf(w, "Result:\t%s\n", entry.ResultID) + } + if entry.Error != "" { + fmt.Fprintf(w, "Error:\t%s\n", entry.Error) + } + return w.Flush() + }, +} + +// --- jobs prune --- + +var ( + jobsPruneOlderThan string + jobsPruneAll bool + jobsPruneDryRun bool +) + +var jobsPruneCmd = &cobra.Command{ + Use: "prune", + Short: "Remove old job entries from the ledger", + Long: `Remove ledger entries older than --older-than (default 7d), or all entries with --all. + +Use --dry-run to preview without modifying the ledger. + +Examples: + vers jobs prune --older-than 7d + vers jobs prune --older-than 24h --dry-run + vers jobs prune --all`, + RunE: func(cmd *cobra.Command, args []string) error { + opts := jobs.PruneOptions{ + All: jobsPruneAll, + DryRun: jobsPruneDryRun, + } + if !jobsPruneAll { + d, err := jobs.ParseDuration(jobsPruneOlderThan) + if err != nil { + return err + } + opts.OlderThan = d + } + res, err := jobs.Prune(opts) + if err != nil { + return err + } + prefix := "pruned" + if jobsPruneDryRun { + prefix = "would prune" + } + fmt.Fprintf(cmd.OutOrStdout(), "%s %d entries (%d kept)\n", prefix, res.Pruned, res.Kept) + return nil + }, +} + +func init() { + rootCmd.AddCommand(jobsCmd) + jobsCmd.AddCommand(jobsListCmd, jobsGetCmd, jobsPruneCmd) + + jobsListCmd.Flags().BoolVar(&jobsListJSON, "json", false, "Output as JSON") + jobsListCmd.Flags().IntVar(&jobsListLimit, "limit", 50, "Maximum number of jobs to return (0 = unbounded)") + jobsListCmd.Flags().IntVar(&jobsListOffset, "offset", 0, "Number of jobs to skip (for paging)") + jobsListCmd.Flags().StringVar(&jobsListStatus, "status", "", "Filter by status: submitted, complete, failed") + + jobsGetCmd.Flags().BoolVar(&jobsGetJSON, "json", false, "Output as JSON") + + jobsPruneCmd.Flags().StringVar(&jobsPruneOlderThan, "older-than", "7d", "Prune entries older than this duration (e.g. 7d, 24h, 30m)") + jobsPruneCmd.Flags().BoolVar(&jobsPruneAll, "all", false, "Prune all entries regardless of age") + jobsPruneCmd.Flags().BoolVar(&jobsPruneDryRun, "dry-run", false, "Preview what would be pruned without writing") +} diff --git a/cmd/resume.go b/cmd/resume.go index 1e58bec..f3e6668 100644 --- a/cmd/resume.go +++ b/cmd/resume.go @@ -2,8 +2,10 @@ package cmd import ( "context" + "os" "github.com/hdresearch/vers-cli/internal/handlers" + "github.com/hdresearch/vers-cli/internal/jobs" pres "github.com/hdresearch/vers-cli/internal/presenters" "github.com/spf13/cobra" ) @@ -29,13 +31,27 @@ Use --wait to block until the VM is running.`, if len(args) > 0 { target = args[0] } + var jobID string + if resumeWait { + jobID, _ = jobs.Submit(jobs.Submission{ + Kind: "vm.resume", + Command: "vers resume --wait", + Args: os.Args[1:], + }) + } view, err := handlers.HandleResume(apiCtx, application, handlers.ResumeReq{ Target: target, Wait: resumeWait, }) if err != nil { + if resumeWait { + _ = jobs.Fail(jobID, err) + } return err } + if resumeWait { + _ = jobs.Complete(jobID, view.VMName) + } format, err := pres.ParseFormat(false, resumeJSON, resumeFormat) if err != nil { diff --git a/cmd/run.go b/cmd/run.go index 89a708f..4c5cfe1 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -2,8 +2,10 @@ package cmd import ( "context" + "os" "github.com/hdresearch/vers-cli/internal/handlers" + "github.com/hdresearch/vers-cli/internal/jobs" pres "github.com/hdresearch/vers-cli/internal/presenters" "github.com/hdresearch/vers-cli/internal/runconfig" "github.com/spf13/cobra" @@ -41,10 +43,24 @@ Use --wait to block until the VM is running.`, VMAlias: vmAlias, Wait: runWait, } + var jobID string + if runWait { + jobID, _ = jobs.Submit(jobs.Submission{ + Kind: "vm.run", + Command: "vers run --wait", + Args: os.Args[1:], + }) + } view, err := handlers.HandleRun(apiCtx, application, req) if err != nil { + if runWait { + _ = jobs.Fail(jobID, err) + } return err } + if runWait { + _ = jobs.Complete(jobID, view.RootVmID) + } format, err := pres.ParseFormat(false, runJSON, runFormat) if err != nil { diff --git a/cmd/run_commit.go b/cmd/run_commit.go index 23664de..1aef3f7 100644 --- a/cmd/run_commit.go +++ b/cmd/run_commit.go @@ -2,8 +2,10 @@ package cmd import ( "context" + "os" "github.com/hdresearch/vers-cli/internal/handlers" + "github.com/hdresearch/vers-cli/internal/jobs" pres "github.com/hdresearch/vers-cli/internal/presenters" "github.com/hdresearch/vers-cli/internal/runconfig" "github.com/spf13/cobra" @@ -35,10 +37,24 @@ Use --wait to block until the VM is running.`, apiCtx, cancel := context.WithTimeout(context.Background(), application.Timeouts.APILong) defer cancel() req := handlers.RunCommitReq{CommitKey: commitKey, VMAlias: commitVmAlias, Wait: runCommitWait} + var jobID string + if runCommitWait { + jobID, _ = jobs.Submit(jobs.Submission{ + Kind: "vm.run_commit", + Command: "vers run-commit --wait", + Args: os.Args[1:], + }) + } view, err := handlers.HandleRunCommit(apiCtx, application, req) if err != nil { + if runCommitWait { + _ = jobs.Fail(jobID, err) + } return err } + if runCommitWait { + _ = jobs.Complete(jobID, view.RootVmID) + } format, err := pres.ParseFormat(false, runCommitJSON, runCommitFormat) if err != nil { diff --git a/internal/feedback/store.go b/internal/feedback/store.go new file mode 100644 index 0000000..3498698 --- /dev/null +++ b/internal/feedback/store.go @@ -0,0 +1,179 @@ +// Package feedback implements local journaling and opt-in upstream POST for +// the `vers feedback` command (F18 Phase A). +// +// Storage is an append-only JSONL file at ~/.vers/feedback.jsonl by default. +// Upstream delivery is opt-in via the VERS_FEEDBACK_ENDPOINT env var; when +// set, each new entry is POSTed best-effort with a 5s timeout. +package feedback + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "encoding/base32" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" +) + +// EndpointEnvVar is the env var used to enable upstream POST. +const EndpointEnvVar = "VERS_FEEDBACK_ENDPOINT" + +// PathEnvVar overrides the default journal path. Primarily for tests. +const PathEnvVar = "VERS_FEEDBACK_PATH" + +// Entry is one record in the local JSONL journal. +type Entry struct { + ID string `json:"id"` + Timestamp string `json:"timestamp"` + VersVersion string `json:"vers_version"` + Message string `json:"message"` + SentUpstream bool `json:"sent_upstream"` +} + +// upstreamPayload is the shape POSTed to VERS_FEEDBACK_ENDPOINT. The +// sent_upstream flag is intentionally journal-only. +type upstreamPayload struct { + ID string `json:"id"` + Timestamp string `json:"timestamp"` + VersVersion string `json:"vers_version"` + Message string `json:"message"` +} + +// DefaultPath returns the default journal path (~/.vers/feedback.jsonl), +// honoring VERS_FEEDBACK_PATH when set. +func DefaultPath() (string, error) { + if p := os.Getenv(PathEnvVar); p != "" { + return p, nil + } + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("resolve home dir: %w", err) + } + return filepath.Join(home, ".vers", "feedback.jsonl"), nil +} + +// NewID returns a short random base32 identifier (10 chars, lowercase, no +// padding). Used as the entry id. +func NewID() (string, error) { + var buf [6]byte + if _, err := rand.Read(buf[:]); err != nil { + return "", fmt.Errorf("read random: %w", err) + } + enc := base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(buf[:]) + // 6 random bytes -> 10 base32 chars. + return strings.ToLower(enc), nil +} + +// Append writes an entry as a single JSON line to path, creating parent +// directories as needed. +func Append(path string, e Entry) error { + if path == "" { + return fmt.Errorf("feedback: empty journal path") + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return fmt.Errorf("create journal dir: %w", err) + } + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return fmt.Errorf("open journal: %w", err) + } + defer f.Close() + b, err := json.Marshal(e) + if err != nil { + return fmt.Errorf("marshal entry: %w", err) + } + b = append(b, '\n') + if _, err := f.Write(b); err != nil { + return fmt.Errorf("write journal: %w", err) + } + return nil +} + +// ReadAll loads every entry from the journal, oldest first. A missing file +// returns an empty slice and no error. +func ReadAll(path string) ([]Entry, error) { + if path == "" { + return nil, fmt.Errorf("feedback: empty journal path") + } + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, fmt.Errorf("open journal: %w", err) + } + defer f.Close() + + var out []Entry + scanner := bufio.NewScanner(f) + // Allow generously-sized lines (long messages). + scanner.Buffer(make([]byte, 64*1024), 1024*1024) + lineNum := 0 + for scanner.Scan() { + lineNum++ + line := bytes.TrimSpace(scanner.Bytes()) + if len(line) == 0 { + continue + } + var e Entry + if err := json.Unmarshal(line, &e); err != nil { + return nil, fmt.Errorf("parse journal line %d: %w", lineNum, err) + } + out = append(out, e) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("scan journal: %w", err) + } + return out, nil +} + +// PostUpstream POSTs the entry payload to endpoint as application/json with a +// 5s timeout. It returns the HTTP status code on success. On any failure +// (transport, non-2xx) it returns a non-nil error along with the status code +// (0 if the request never completed). +func PostUpstream(ctx context.Context, endpoint string, e Entry) (int, error) { + payload := upstreamPayload{ + ID: e.ID, + Timestamp: e.Timestamp, + VersVersion: e.VersVersion, + Message: e.Message, + } + body, err := json.Marshal(payload) + if err != nil { + return 0, fmt.Errorf("marshal payload: %w", err) + } + + reqCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + req, err := http.NewRequestWithContext(reqCtx, http.MethodPost, endpoint, bytes.NewReader(body)) + if err != nil { + return 0, fmt.Errorf("build request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{Timeout: 5 * time.Second} + resp, err := client.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + // Drain so connections can be reused. + _, _ = io.Copy(io.Discard, resp.Body) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return resp.StatusCode, fmt.Errorf("upstream returned status %d", resp.StatusCode) + } + return resp.StatusCode, nil +} + +// Now returns the current time as an RFC3339 string in UTC. +func Now() string { + return time.Now().UTC().Format(time.RFC3339) +} diff --git a/internal/feedback/store_test.go b/internal/feedback/store_test.go new file mode 100644 index 0000000..e5f1591 --- /dev/null +++ b/internal/feedback/store_test.go @@ -0,0 +1,169 @@ +package feedback + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestNewID(t *testing.T) { + id1, err := NewID() + if err != nil { + t.Fatalf("NewID: %v", err) + } + if got := len(id1); got < 8 || got > 12 { + t.Errorf("expected id length in 8..12, got %d (%q)", got, id1) + } + if id1 != strings.ToLower(id1) { + t.Errorf("expected lowercase id, got %q", id1) + } + id2, err := NewID() + if err != nil { + t.Fatalf("NewID: %v", err) + } + if id1 == id2 { + t.Errorf("expected unique ids, got duplicate %q", id1) + } +} + +func TestAppendAndReadAll(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "nested", "feedback.jsonl") + + entries := []Entry{ + {ID: "a1", Timestamp: "2026-05-07T00:00:00Z", VersVersion: "v1", Message: "hi", SentUpstream: false}, + {ID: "b2", Timestamp: "2026-05-07T00:00:01Z", VersVersion: "v1", Message: "two\nlines", SentUpstream: true}, + } + for _, e := range entries { + if err := Append(path, e); err != nil { + t.Fatalf("Append: %v", err) + } + } + + got, err := ReadAll(path) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if len(got) != len(entries) { + t.Fatalf("expected %d entries, got %d", len(entries), len(got)) + } + for i, e := range entries { + if got[i] != e { + t.Errorf("entry %d: want %+v, got %+v", i, e, got[i]) + } + } + + // Each line in the file must be a single valid JSON object. + raw, err := os.ReadFile(path) + if err != nil { + t.Fatalf("read raw: %v", err) + } + for i, line := range strings.Split(strings.TrimRight(string(raw), "\n"), "\n") { + var e Entry + if err := json.Unmarshal([]byte(line), &e); err != nil { + t.Errorf("line %d not valid json: %v", i, err) + } + } +} + +func TestReadAllMissingFile(t *testing.T) { + dir := t.TempDir() + got, err := ReadAll(filepath.Join(dir, "nope.jsonl")) + if err != nil { + t.Fatalf("ReadAll on missing file: %v", err) + } + if len(got) != 0 { + t.Errorf("expected empty slice, got %d entries", len(got)) + } +} + +func TestDefaultPathHonorsEnv(t *testing.T) { + t.Setenv(PathEnvVar, "/tmp/custom-feedback.jsonl") + p, err := DefaultPath() + if err != nil { + t.Fatalf("DefaultPath: %v", err) + } + if p != "/tmp/custom-feedback.jsonl" { + t.Errorf("expected env override, got %q", p) + } +} + +func TestDefaultPathFallsBackToHome(t *testing.T) { + t.Setenv(PathEnvVar, "") + p, err := DefaultPath() + if err != nil { + t.Fatalf("DefaultPath: %v", err) + } + if !strings.HasSuffix(p, filepath.Join(".vers", "feedback.jsonl")) { + t.Errorf("expected path to end with .vers/feedback.jsonl, got %q", p) + } +} + +func TestPostUpstreamSuccess(t *testing.T) { + var gotBody []byte + var gotCT string + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotCT = r.Header.Get("Content-Type") + gotBody, _ = io.ReadAll(r.Body) + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + e := Entry{ID: "x", Timestamp: "t", VersVersion: "v", Message: "m", SentUpstream: false} + status, err := PostUpstream(context.Background(), srv.URL, e) + if err != nil { + t.Fatalf("PostUpstream: %v", err) + } + if status != 200 { + t.Errorf("expected 200, got %d", status) + } + if gotCT != "application/json" { + t.Errorf("expected application/json content-type, got %q", gotCT) + } + + // Body must not include sent_upstream. + var payload map[string]interface{} + if err := json.Unmarshal(gotBody, &payload); err != nil { + t.Fatalf("unmarshal posted body: %v", err) + } + if _, ok := payload["sent_upstream"]; ok { + t.Errorf("sent_upstream must not be POSTed; payload=%v", payload) + } + for _, k := range []string{"id", "timestamp", "vers_version", "message"} { + if _, ok := payload[k]; !ok { + t.Errorf("missing %q in payload: %v", k, payload) + } + } +} + +func TestPostUpstreamNon2xx(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) + })) + defer srv.Close() + + status, err := PostUpstream(context.Background(), srv.URL, Entry{ID: "x"}) + if err == nil { + t.Errorf("expected error on 500, got nil") + } + if status != 500 { + t.Errorf("expected status 500, got %d", status) + } +} + +func TestPostUpstreamTransportError(t *testing.T) { + // Non-routable address should fail fast. + status, err := PostUpstream(context.Background(), "http://127.0.0.1:1/", Entry{ID: "x"}) + if err == nil { + t.Errorf("expected transport error, got nil") + } + if status != 0 { + t.Errorf("expected status 0 on transport error, got %d", status) + } +} diff --git a/internal/jobs/ledger.go b/internal/jobs/ledger.go new file mode 100644 index 0000000..b0f30af --- /dev/null +++ b/internal/jobs/ledger.go @@ -0,0 +1,436 @@ +// Package jobs implements a durable, append-only JSONL ledger for tracking +// `--wait` invocations across vers-cli runs (F14 Phase 1: journaling only). +// +// Phase 1 ships journaling: every `--wait` command appends a "submitted" entry +// when it dispatches the API call, and a "complete" or "failed" entry when the +// handler returns. Resumption of in-flight jobs is intentionally out of scope +// (Phase 2). +// +// Writes are best-effort: a write or directory failure must NOT cause the +// invoking command to fail. Callers should ignore the error returned from +// Submit/Complete/Fail in normal flow (or log it to stderr at most). +package jobs + +import ( + "crypto/rand" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" + "sort" + "strings" + "time" +) + +// Status values for ledger entries. +const ( + StatusSubmitted = "submitted" + StatusComplete = "complete" + StatusFailed = "failed" +) + +// Entry is a single ledger record. Each line in the JSONL file is one Entry. +// Entries with the same ID represent state transitions of the same job; the +// latest line wins when collapsing for reads. +type Entry struct { + ID string `json:"id"` + Kind string `json:"kind"` + Command string `json:"command"` + Args []string `json:"args"` + StartedAt time.Time `json:"started_at"` + CompletedAt *time.Time `json:"completed_at,omitempty"` + DurationMs *int64 `json:"duration_ms,omitempty"` + Status string `json:"status"` + ResultID string `json:"result_id,omitempty"` + Error string `json:"error,omitempty"` +} + +// Submission is the input for Submit. Args should not include the program +// name; Command is a short, human-readable invocation summary like +// "vers run --wait". +type Submission struct { + Kind string + Command string + Args []string +} + +// dirOverride, when non-empty, takes precedence over the VERS_JOBS_DIR +// environment variable and the default (~/.vers). Tests use SetDir. +var dirOverride string + +// SetDir overrides the directory the ledger is stored in for the current +// process. Pass "" to clear the override (env var / default re-applies). +// Intended for tests. +func SetDir(dir string) { dirOverride = dir } + +// Dir returns the resolved ledger directory. +// +// Precedence: explicit override (SetDir) > VERS_JOBS_DIR env > $HOME/.vers. +func Dir() (string, error) { + if dirOverride != "" { + return dirOverride, nil + } + if d := os.Getenv("VERS_JOBS_DIR"); d != "" { + return d, nil + } + home, err := os.UserHomeDir() + if err != nil { + return "", err + } + return filepath.Join(home, ".vers"), nil +} + +// Path returns the full path to the JSONL ledger file. +func Path() (string, error) { + d, err := Dir() + if err != nil { + return "", err + } + return filepath.Join(d, "jobs.jsonl"), nil +} + +// Submit appends a new "submitted" entry and returns the assigned job id. +// On any I/O failure it returns an empty string and a non-nil error; the +// caller should treat that as best-effort and continue regardless. +func Submit(s Submission) (string, error) { + id, err := newJobID() + if err != nil { + return "", err + } + e := Entry{ + ID: id, + Kind: s.Kind, + Command: s.Command, + Args: append([]string{}, s.Args...), + StartedAt: time.Now().UTC(), + Status: StatusSubmitted, + } + if err := Append(e); err != nil { + return id, err + } + return id, nil +} + +// Complete appends a terminal "complete" entry referencing an existing id. +// If id is empty (e.g. Submit failed), Complete is a no-op. +func Complete(id, resultID string) error { + if id == "" { + return nil + } + prev, ok, err := latestByID(id) + if err != nil { + return err + } + now := time.Now().UTC() + e := Entry{ + ID: id, + Status: StatusComplete, + CompletedAt: &now, + ResultID: resultID, + } + if ok { + copyMeta(&e, prev) + dur := now.Sub(prev.StartedAt).Milliseconds() + e.DurationMs = &dur + } + return Append(e) +} + +// Fail appends a terminal "failed" entry. If id is empty, Fail is a no-op. +// A nil err is recorded as an empty error string but still marked failed. +func Fail(id string, err error) error { + if id == "" { + return nil + } + prev, ok, lerr := latestByID(id) + if lerr != nil { + return lerr + } + now := time.Now().UTC() + msg := "" + if err != nil { + msg = err.Error() + } + e := Entry{ + ID: id, + Status: StatusFailed, + CompletedAt: &now, + Error: msg, + } + if ok { + copyMeta(&e, prev) + dur := now.Sub(prev.StartedAt).Milliseconds() + e.DurationMs = &dur + } + return Append(e) +} + +// copyMeta carries forward identity-stable fields from an earlier entry into +// a terminal entry so the latest line is self-contained. +func copyMeta(dst *Entry, src Entry) { + dst.Kind = src.Kind + dst.Command = src.Command + dst.Args = append([]string{}, src.Args...) + dst.StartedAt = src.StartedAt +} + +// Append writes a single entry as one JSON line to the ledger file. The +// directory is created if missing. +func Append(e Entry) error { + dir, err := Dir() + if err != nil { + return err + } + if err := os.MkdirAll(dir, 0o755); err != nil { + return err + } + path := filepath.Join(dir, "jobs.jsonl") + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644) + if err != nil { + return err + } + defer f.Close() + b, err := json.Marshal(e) + if err != nil { + return err + } + b = append(b, '\n') + _, err = f.Write(b) + return err +} + +// readAll reads every entry in file order (append order). Missing file +// returns (nil, nil). +func readAll() ([]Entry, error) { + path, err := Path() + if err != nil { + return nil, err + } + data, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + var out []Entry + for _, line := range strings.Split(string(data), "\n") { + line = strings.TrimSpace(line) + if line == "" { + continue + } + var e Entry + if err := json.Unmarshal([]byte(line), &e); err != nil { + // Skip malformed lines rather than failing the whole read. + continue + } + out = append(out, e) + } + return out, nil +} + +// Latest returns the latest state per job id, ordered by StartedAt descending +// (most recent first). Optional statusFilter, when non-empty, restricts to +// entries with matching status. +func Latest(statusFilter string) ([]Entry, error) { + all, err := readAll() + if err != nil { + return nil, err + } + by := make(map[string]Entry, len(all)) + for _, e := range all { + prev, ok := by[e.ID] + if !ok { + by[e.ID] = e + continue + } + // Later line wins. Prefer terminal status; otherwise keep the + // most-recent line by file order (which equals append order). + if isTerminal(e.Status) || !isTerminal(prev.Status) { + by[e.ID] = e + } + } + out := make([]Entry, 0, len(by)) + for _, e := range by { + if statusFilter != "" && e.Status != statusFilter { + continue + } + out = append(out, e) + } + sort.Slice(out, func(i, j int) bool { + return out[i].StartedAt.After(out[j].StartedAt) + }) + return out, nil +} + +// Get returns the latest state of one job id. +func Get(id string) (Entry, bool, error) { + return latestByID(id) +} + +func latestByID(id string) (Entry, bool, error) { + all, err := readAll() + if err != nil { + return Entry{}, false, err + } + var found Entry + ok := false + for _, e := range all { + if e.ID != id { + continue + } + if !ok { + found = e + ok = true + continue + } + if isTerminal(e.Status) || !isTerminal(found.Status) { + found = e + } + } + return found, ok, nil +} + +func isTerminal(s string) bool { + return s == StatusComplete || s == StatusFailed +} + +// PruneOptions configures Prune. +type PruneOptions struct { + OlderThan time.Duration // entries with StartedAt older than now-OlderThan are removed + All bool // when true, removes everything regardless of OlderThan + DryRun bool // when true, no file is written + Now time.Time // optional clock override; zero value uses time.Now().UTC() +} + +// PruneResult summarises a prune call. +type PruneResult struct { + Pruned int + Kept int +} + +// Prune removes ledger entries by age (or all of them) and returns counts. +// In DryRun mode the ledger file is untouched. +// +// Pruning operates on the latest-state-per-id collapse: a job is kept iff its +// latest entry is younger than the cutoff. When kept, all of its entries are +// preserved to maintain the audit trail. +func Prune(opts PruneOptions) (PruneResult, error) { + all, err := readAll() + if err != nil { + return PruneResult{}, err + } + if len(all) == 0 { + return PruneResult{}, nil + } + + now := opts.Now + if now.IsZero() { + now = time.Now().UTC() + } + + // Collapse to latest state per id to decide which ids to keep. + latest := make(map[string]Entry) + for _, e := range all { + prev, ok := latest[e.ID] + if !ok || isTerminal(e.Status) || !isTerminal(prev.Status) { + latest[e.ID] = e + } + } + + keepIDs := make(map[string]bool, len(latest)) + prunedIDs := 0 + keptIDs := 0 + for id, e := range latest { + if opts.All { + prunedIDs++ + continue + } + ref := e.StartedAt + if e.CompletedAt != nil { + ref = *e.CompletedAt + } + if now.Sub(ref) > opts.OlderThan { + prunedIDs++ + continue + } + keepIDs[id] = true + keptIDs++ + } + + if opts.DryRun { + return PruneResult{Pruned: prunedIDs, Kept: keptIDs}, nil + } + + // Rewrite the file with only kept entries. + path, err := Path() + if err != nil { + return PruneResult{}, err + } + if opts.All || keptIDs == 0 { + if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + return PruneResult{}, err + } + return PruneResult{Pruned: prunedIDs, Kept: 0}, nil + } + + tmp := path + ".tmp" + f, err := os.Create(tmp) + if err != nil { + return PruneResult{}, err + } + enc := json.NewEncoder(f) + for _, e := range all { + if !keepIDs[e.ID] { + continue + } + if err := enc.Encode(e); err != nil { + f.Close() + os.Remove(tmp) + return PruneResult{}, err + } + } + if err := f.Close(); err != nil { + os.Remove(tmp) + return PruneResult{}, err + } + if err := os.Rename(tmp, path); err != nil { + os.Remove(tmp) + return PruneResult{}, err + } + return PruneResult{Pruned: prunedIDs, Kept: keptIDs}, nil +} + +// ParseDuration accepts the small set we surface to users: "7d", "24h", +// "30m", etc. Anything time.ParseDuration accepts is also accepted; the +// special "Nd" suffix converts to N*24h. +func ParseDuration(s string) (time.Duration, error) { + s = strings.TrimSpace(s) + if s == "" { + return 0, fmt.Errorf("duration is empty") + } + if strings.HasSuffix(s, "d") { + var days int + if _, err := fmt.Sscanf(strings.TrimSuffix(s, "d"), "%d", &days); err != nil { + return 0, fmt.Errorf("invalid duration %q: must be like 7d, 24h, 30m", s) + } + if days < 0 { + return 0, fmt.Errorf("duration must be non-negative: %q", s) + } + return time.Duration(days) * 24 * time.Hour, nil + } + d, err := time.ParseDuration(s) + if err != nil { + return 0, fmt.Errorf("invalid duration %q: must be like 7d, 24h, 30m", s) + } + return d, nil +} + +func newJobID() (string, error) { + var b [5]byte + if _, err := rand.Read(b[:]); err != nil { + return "", err + } + return "job_" + hex.EncodeToString(b[:]), nil +} diff --git a/internal/jobs/ledger_test.go b/internal/jobs/ledger_test.go new file mode 100644 index 0000000..bc87f88 --- /dev/null +++ b/internal/jobs/ledger_test.go @@ -0,0 +1,250 @@ +package jobs + +import ( + "errors" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func setup(t *testing.T) string { + t.Helper() + dir := t.TempDir() + SetDir(dir) + t.Cleanup(func() { SetDir("") }) + return dir +} + +func TestSubmitCompleteRoundtrip(t *testing.T) { + dir := setup(t) + + id, err := Submit(Submission{Kind: "vm.run", Command: "vers run --wait", Args: []string{"--vm-alias", "x"}}) + if err != nil { + t.Fatalf("Submit: %v", err) + } + if !strings.HasPrefix(id, "job_") { + t.Fatalf("expected job_ prefix, got %q", id) + } + if err := Complete(id, "vm-abc"); err != nil { + t.Fatalf("Complete: %v", err) + } + + // File should have two lines. + data, err := os.ReadFile(filepath.Join(dir, "jobs.jsonl")) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if got := strings.Count(strings.TrimSpace(string(data)), "\n") + 1; got != 2 { + t.Fatalf("expected 2 lines, got %d (%s)", got, data) + } + + got, ok, err := Get(id) + if err != nil || !ok { + t.Fatalf("Get: ok=%v err=%v", ok, err) + } + if got.Status != StatusComplete { + t.Fatalf("expected complete, got %q", got.Status) + } + if got.ResultID != "vm-abc" { + t.Fatalf("expected result id carried, got %q", got.ResultID) + } + if got.Kind != "vm.run" || got.Command != "vers run --wait" { + t.Fatalf("metadata not carried: %+v", got) + } + if got.DurationMs == nil { + t.Fatalf("expected duration to be set") + } +} + +func TestFail(t *testing.T) { + setup(t) + id, err := Submit(Submission{Kind: "vm.branch", Command: "vers branch --wait"}) + if err != nil { + t.Fatalf("Submit: %v", err) + } + if err := Fail(id, errors.New("boom")); err != nil { + t.Fatalf("Fail: %v", err) + } + got, ok, err := Get(id) + if err != nil || !ok { + t.Fatalf("Get: ok=%v err=%v", ok, err) + } + if got.Status != StatusFailed || got.Error != "boom" { + t.Fatalf("unexpected entry: %+v", got) + } +} + +func TestLatestCollapseAndOrder(t *testing.T) { + setup(t) + a, _ := Submit(Submission{Kind: "vm.run", Command: "vers run"}) + time.Sleep(2 * time.Millisecond) + b, _ := Submit(Submission{Kind: "vm.branch", Command: "vers branch"}) + time.Sleep(2 * time.Millisecond) + c, _ := Submit(Submission{Kind: "vm.deploy", Command: "vers deploy"}) + + if err := Complete(a, "vm-a"); err != nil { + t.Fatal(err) + } + if err := Fail(b, errors.New("nope")); err != nil { + t.Fatal(err) + } + // c stays submitted. + + all, err := Latest("") + if err != nil { + t.Fatalf("Latest: %v", err) + } + if len(all) != 3 { + t.Fatalf("expected 3 jobs, got %d", len(all)) + } + // Order: most-recent StartedAt first. + if all[0].ID != c || all[1].ID != b || all[2].ID != a { + t.Fatalf("order wrong: %+v", []string{all[0].ID, all[1].ID, all[2].ID}) + } + // Verify collapse: a should be complete, b failed, c submitted. + statusByID := map[string]string{} + for _, e := range all { + statusByID[e.ID] = e.Status + } + if statusByID[a] != StatusComplete || statusByID[b] != StatusFailed || statusByID[c] != StatusSubmitted { + t.Fatalf("collapse wrong: %+v", statusByID) + } + + // Filter by status. + failed, _ := Latest(StatusFailed) + if len(failed) != 1 || failed[0].ID != b { + t.Fatalf("status filter failed: %+v", failed) + } +} + +func TestPruneByAge(t *testing.T) { + dir := setup(t) + // Manually craft entries with old timestamps so Prune has something to remove. + old := time.Now().UTC().Add(-30 * 24 * time.Hour) + oldDone := old.Add(time.Second) + dur := int64(1000) + mustAppend(t, Entry{ID: "job_old", Kind: "vm.run", Command: "vers run", StartedAt: old, Status: StatusSubmitted}) + mustAppend(t, Entry{ID: "job_old", Kind: "vm.run", Command: "vers run", StartedAt: old, CompletedAt: &oldDone, DurationMs: &dur, Status: StatusComplete, ResultID: "vm-old"}) + + freshID, err := Submit(Submission{Kind: "vm.branch", Command: "vers branch"}) + if err != nil { + t.Fatal(err) + } + if err := Complete(freshID, "vm-new"); err != nil { + t.Fatal(err) + } + + res, err := Prune(PruneOptions{OlderThan: 7 * 24 * time.Hour, DryRun: true}) + if err != nil { + t.Fatalf("Prune dry-run: %v", err) + } + if res.Pruned != 1 || res.Kept != 1 { + t.Fatalf("dry-run counts: %+v", res) + } + // Dry-run must not modify file. + before, _ := os.ReadFile(filepath.Join(dir, "jobs.jsonl")) + + res, err = Prune(PruneOptions{OlderThan: 7 * 24 * time.Hour}) + if err != nil { + t.Fatalf("Prune: %v", err) + } + if res.Pruned != 1 || res.Kept != 1 { + t.Fatalf("counts: %+v", res) + } + after, _ := os.ReadFile(filepath.Join(dir, "jobs.jsonl")) + if len(after) >= len(before) { + t.Fatalf("expected file to shrink: before=%d after=%d", len(before), len(after)) + } + + if _, ok, _ := Get("job_old"); ok { + t.Fatalf("expected job_old to be pruned") + } + if _, ok, _ := Get(freshID); !ok { + t.Fatalf("expected fresh job to survive prune") + } +} + +func TestPruneAll(t *testing.T) { + dir := setup(t) + id, _ := Submit(Submission{Kind: "vm.run", Command: "vers run"}) + _ = Complete(id, "vm-x") + + res, err := Prune(PruneOptions{All: true}) + if err != nil { + t.Fatalf("Prune all: %v", err) + } + if res.Kept != 0 || res.Pruned != 1 { + t.Fatalf("counts: %+v", res) + } + if _, err := os.Stat(filepath.Join(dir, "jobs.jsonl")); !os.IsNotExist(err) { + t.Fatalf("expected ledger removed, stat err=%v", err) + } +} + +func TestParseDuration(t *testing.T) { + cases := []struct { + in string + want time.Duration + err bool + }{ + {"7d", 7 * 24 * time.Hour, false}, + {"24h", 24 * time.Hour, false}, + {"30m", 30 * time.Minute, false}, + {"0d", 0, false}, + {"", 0, true}, + {"banana", 0, true}, + {"-1d", 0, true}, + } + for _, tc := range cases { + got, err := ParseDuration(tc.in) + if tc.err { + if err == nil { + t.Errorf("ParseDuration(%q) expected error, got %v", tc.in, got) + } + continue + } + if err != nil { + t.Errorf("ParseDuration(%q) unexpected error: %v", tc.in, err) + continue + } + if got != tc.want { + t.Errorf("ParseDuration(%q) = %v, want %v", tc.in, got, tc.want) + } + } +} + +func TestNoLedgerFile(t *testing.T) { + setup(t) + all, err := Latest("") + if err != nil { + t.Fatalf("Latest: %v", err) + } + if len(all) != 0 { + t.Fatalf("expected empty list, got %d", len(all)) + } + if _, ok, err := Get("job_missing"); err != nil || ok { + t.Fatalf("Get on empty ledger: ok=%v err=%v", ok, err) + } +} + +func TestEmptyIDIsNoop(t *testing.T) { + dir := setup(t) + if err := Complete("", ""); err != nil { + t.Fatal(err) + } + if err := Fail("", errors.New("x")); err != nil { + t.Fatal(err) + } + if _, err := os.Stat(filepath.Join(dir, "jobs.jsonl")); !os.IsNotExist(err) { + t.Fatalf("ledger should not have been created, stat err=%v", err) + } +} + +func mustAppend(t *testing.T, e Entry) { + t.Helper() + if err := Append(e); err != nil { + t.Fatalf("Append: %v", err) + } +}